query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
generates initial hidden states for each agent
def generate_initial_hidden_states(self, batch_size, test_mode=False, caller=None): # Set up hidden states for all levels - and propagate through the runner! hidden_dict = {} hidden_dict["level1"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _ in range(self.n_agents if self.is_obs_noise(test_mode) and caller != "learner" else 1)]) hidden_dict["level2"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _ in range(len(sorted(combinations(list(range(self.n_agents)), 2)))*2 if self.is_obs_noise(test_mode) and caller != "learner" else len(sorted(combinations(list(range(self.n_agents)), 2))))]) hidden_dict["level3"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _ in range(self.n_agents)]) if self.args.use_cuda: hidden_dict = {_k:_v.cuda() for _k, _v in hidden_dict.items()} return hidden_dict, "?*bs*v*t"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()", "def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()", "def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()", "def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.opponents = self.getOpponents(gameState)\n self.distributions = []\n self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]\n print self.legalPositions\n\n #initializing beleif distribution of opponents\n for i in range(0, gameState.getNumAgents()):\n if i in self.opponents:\n beliefs = util.Counter()\n for p in self.legalPositions: beliefs[p] = 1.0\n beliefs.normalize()\n self.distributions.append(beliefs)\n else:\n self.distributions.append(None)\n\n\n ''' \n Your initialization code goes here, if you need any.\n '''", "def init_hidden(self):\n # TODO ========================\n # initialize the hidden states to zero\n\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)", "def _init_episode(self):\n # get states - one-hots\n self._states = np.zeros((self._size_state, self._size_state))\n\n # to_ones = np.random.permutation(self._size_state)[0:3]\n for x in xrange(self._size_state):\n # self._states[x][to_ones[x]] = 1\n self._states[x][x] = 1\n\n self._prob_transition = np.array([[.8,.2]])\n self._randomize()\n self._current_state = 0\n self._last_state = 0\n self._stage = 0\n self._since_flipped = 0", "def initial_state(self):\n # Network details elided.\n return self._agent.initial_state()", "def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten", "def fetch_initial_states(self):\n for agent_id, agent_obj in self.__registered_agents.items():\n # given the agent's capabilities, get everything the agent can perceive\n state = self.__get_agent_state(agent_obj)\n\n # filter other things from the agent state\n filtered_agent_state = agent_obj.filter_observations(state)\n\n # save the current agent's state for the API\n api.add_state(agent_id=agent_id, state=filtered_agent_state,\n agent_inheritence_chain=agent_obj.class_inheritance,\n world_settings=api.MATRX_info)\n\n # add god state\n api.add_state(agent_id=\"god\", state=self.__get_complete_state(), agent_inheritence_chain=\"god\",\n world_settings=api.MATRX_info)\n\n # initialize the message manager\n self.message_manager.agents = self.__registered_agents.keys()\n self.message_manager.teams = self.__teams\n\n # make the information of this tick available via the API, after all\n # agents have been updated\n api.next_tick()", "def reset(self):\r\n \r\n self.done = False\r\n self.t = 0\r\n self.episode = random.choice(episodes)\r\n\r\n # initiate agent\r\n self.agent = self.create_agent(Agent)\r\n \r\n # initiate state at time zero\r\n self.state = (self.episode[self.t]['ST Relative Indicator'], \r\n self.episode[self.t]['ST Relative Indicator'], \r\n self.agent.stock,\r\n self.t)\r\n \r\n return self.state", "def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)", "def init_hidden(self, batch_size):\r\n \r\n self.hidden_state = (\r\n torch.zeros(((1+self.bidirectional)*self.num_layers,\r\n batch_size,\r\n self.hidden_size)).to(self.device),\r\n torch.zeros(((1+self.bidirectional)*self.num_layers, \r\n batch_size, \r\n self.hidden_size)).to(self.device))", "def init_states(self):\n self.filtered_state_means = None\n self.filtered_state_covariances = None\n self.predicted_state_means = None\n self.predicted_state_covariances = None\n self.smoothed_state_means = None\n self.smoothed_state_covariances = None", "def init_hidden_state(self, batch_size):\n hidden_state = tf.tile(self.initial_hidden_state[None, ...], [batch_size, 1])\n cell_state = tf.tile(self.initial_cell_state[None, ...], [batch_size, 1])\n return hidden_state, cell_state", "def generate_initial_states(env, max_steps=10000):\n\n initial_state, _ = env.reset()\n\n n_steps = 0\n seen_states = set([initial_state])\n frontier = [initial_state]\n while frontier and n_steps < max_steps:\n state = frontier.pop()\n valid_actions = sorted(list(env.action_space.all_ground_literals(state)))\n for action in valid_actions:\n env.set_state(state)\n next_state = env.step(action)[0]\n n_steps += 1\n if next_state not in seen_states:\n seen_states.add(next_state)\n frontier.append(next_state)\n if n_steps >= max_steps:\n break\n\n seen_states.remove(initial_state)\n # Sort states using the One True Ordering\n states = sorted(list(seen_states), key=lambda x: sorted(list(x.literals)))\n old_rng_st = random.getstate()\n random.seed(0)\n random.shuffle(states)\n random.setstate(old_rng_st)\n\n return states", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def init_hidden_state(self, encoder_out: torch.Tensor):\n pass", "def make_alternative_states(self) -> np.ndarray:\n states = []\n for agent in range(self.agents):\n agent_state = []\n\n # Own distance\n r, c = self.game.get_agent_pos(agent)\n agent_state.append(r / 6)\n agent_state.append(c / 6)\n\n # Distances to others\n distances_r = [\n (r - pos[0]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n distances_c = [\n (c - pos[1]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n agent_state += distances_r\n agent_state += distances_c\n\n # Goal distances\n distances_goal_r = [(r - pos[0]) / 12 for pos in self.payoff_fields]\n distances_goal_c = [(c - pos[1]) / 12 for pos in self.payoff_fields]\n agent_state += distances_goal_r\n agent_state += distances_goal_c\n\n if agent < self.num_informed:\n agent_state.append((r - self.special_payoff_fields[0][0]) / 12)\n agent_state.append((c - self.special_payoff_fields[0][1]) / 12)\n else:\n agent_state += [0, 0]\n agent_state.append(self.max_turns - self.turns_count)\n states.append(np.array(agent_state))\n\n states = np.stack(states, axis=0)\n return states", "def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h", "def initialize_state(self):\n super(InverseChain, self).initialize_state()", "def initial_states(self):\n return list(self.iter_initial_states())", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # NOTE: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc.\n # A simple way to implement the model is to have a dictionary of dictionaries, \n # mapping each state to a dictionary which maps actions to (reward, next state) tuples.\n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {} # model is a dictionary of dictionaries, which maps states to actions to \n # (reward, next_state) tuples", "def states_initial(self):\n return self.states(\"Initial = YES\")", "def make_initial_state(self):\n return {\n 'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))\n }", "def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]", "def _reset(self):\r\n \r\n airgym.reset()\r\n self.stepN = 0\r\n self.episodeN += 1\r\n \r\n self.allLogs = { 'reward': [0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n print(\"\")\r\n \r\n #self.sensors = airgym.getSensorStates()\r\n \r\n # Initial state\r\n self.state = airgym.getScreenDepthVis()\r\n \r\n \r\n return self.state", "def init_hidden_state(self,batch_size):\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\n return h, c", "def initialize_hidden_state(self):\n return tf.zeros(shape=(self.batch_size, self.enc_units))", "def reset(self, *agents):\n # initialize the state to [0, 0, ..., 0] (length D+1) + [1, 1]\n for i in range(len(agents)):\n D_state = np.hstack((np.zeros(shape=(agents[i].D + 1)), [1, 1]))\n if i == 0:\n self.state = D_state\n else:\n self.state = np.hstack((self.state, D_state))\n\n self.k = 1\n\n # price\n self.S = np.zeros(shape=(self.N,))\n self.S[ind(self.k)] = self.initial_market_price\n self.S_tilde = np.zeros(shape=(self.N,))\n self.S_tilde[ind(self.k)] = self.initial_market_price\n\n for agent in agents:\n agent.reset()\n\n return self.state", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initializeStates(n):\n states = []\n for i in range(n):\n states.append(0)\n return states", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # Note: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n self.kappa = agent_info.get(\"kappa\", 0.001)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, tau, etc.\n # The visitation-counts can be stored as a table as well, like the action values \n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.tau = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {}", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def generate_all_states(self):\n self.clingo = ClingoBridge() # reset clingo\n\n base = ('base', '')\n self.clingo.add_file('initial-states.lp')\n self.clingo.run([base])\n output = self.clingo.output\n\n num_states = int(len(output) / 2)\n\n states = np.full(num_states, object)\n for i in range(0, num_states):\n state_atoms = []\n for atom in output[i]:\n if atom.name == 'state':\n state_atoms.append(atom)\n states[i] = self.parse_state(state_atoms)\n return states", "def initialize_state(self):\n accepted = False\n while not accepted:\n self.state = self.net.sample(self.evidence)\n accepted = self.net.log_probability(self.state) != utils.LOG_PROB_0", "def initVariable(self, state):\n self.nb_agent = state.getNumAgents()\n self.first_call = False", "def init_hidden(self):\n pass", "def init_hidden(self):\n pass", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def registerInitialState(self, gameState):\r\n \r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n \r\n \r\n self.teamMates = []\r\n for mate in self.getTeam(gameState):\r\n if mate is not self.index:\r\n self.teamMates.append(mate)\r\n \r\n def getSuccessors(walls, state):\r\n successors = []\r\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\r\n x,y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n if not walls[nextx][nexty]:\r\n nextState = (nextx, nexty)\r\n cost = 1\r\n successors.append( ( nextState, action, cost) )\r\n return successors\r\n \r\n \r\n \r\n class o0State:\r\n def __init__(self, pos, node = None):\r\n self.pos = pos\r\n self.node = node\r\n self.deadEndDepth = 0.0\r\n self.successors = {}\r\n self.successorsByNodePos = {}\r\n def isDeadEndNode(self):\r\n if self.node is None:\r\n return False\r\n noneDeadEndCount = 0\r\n for successor in self.successors.values():\r\n if not successor.isDeadEnd:\r\n noneDeadEndCount += 1\r\n return noneDeadEndCount is 1\r\n class o0Node:\r\n def __init__(self, pos):\r\n self.pos = pos\r\n self.isDeadEnd = False\r\n class o0Successor:\r\n def __init__(self, direction, nextPos, nextNodePos = None):\r\n self.direction = direction\r\n self.nextPos = nextPos\r\n self.nextNodePos = nextNodePos\r\n self.isDeadEnd = False\r\n\r\n class o0PathMap:\r\n def __init__(self, gameState):\r\n #print 'init pathMap'\r\n walls = gameState.getWalls()\r\n positions = walls.asList(False)\r\n self.states = {}\r\n self.nodes = {}\r\n for pos in positions:\r\n self.states[pos] = o0State(pos)\r\n for successor in getSuccessors(walls,pos):\r\n self.states[pos].successors[successor[1]] = o0Successor(successor[1],successor[0])\r\n successorCount = len(self.states[pos].successors)\r\n if successorCount is not 2:\r\n node = o0Node(pos)\r\n self.nodes[pos] = node\r\n self.states[pos].node = node\r\n \r\n def connectNode(node):\r\n for nodeSuccessor in self.states[node.pos].successors.values():\r\n if nodeSuccessor.nextNodePos is None:\r\n forwardSuccessors = [nodeSuccessor]\r\n backwardSuccessors = []\r\n previousPos = node.pos\r\n currentPos = nodeSuccessor.nextPos\r\n while currentPos not in self.nodes.keys():\r\n #print node.pos\r\n #print currentPos\r\n if len(self.states[currentPos].successors) is not 2:\r\n print 'not a path'\r\n for successor in self.states[currentPos].successors.values():\r\n #print successor.nextPos\r\n if successor.nextPos[0] is previousPos[0] and successor.nextPos[1] is previousPos[1]:\r\n backwardSuccessors.append(successor)\r\n else:\r\n forwardSuccessors.append(successor)\r\n previousPos = currentPos\r\n currentPos = forwardSuccessors[len(forwardSuccessors) - 1].nextPos\r\n for successor in self.states[currentPos].successors.values():\r\n if successor.nextPos is previousPos:\r\n backwardSuccessors.append(successor)\r\n \r\n for successor in forwardSuccessors:\r\n successor.nextNodePos = currentPos\r\n for successor in backwardSuccessors:\r\n successor.nextNodePos = node.pos\r\n \r\n #connectNode(self.nodes.values()[0])\r\n #connectNode(self.nodes.values()[1])\r\n #connectNode(self.nodes.values()[2])\r\n #connectNode(self.nodes.values()[3])\r\n #connectNode(self.nodes.values()[4])\r\n #connectNode(self.nodes.values()[5])\r\n \r\n for node in self.nodes.values():\r\n connectNode(node)#'''\r\n for state in self.states.values():\r\n for successor in self.states[state.pos].successors.values():\r\n self.states[state.pos].successorsByNodePos[successor.nextNodePos] = successor\r\n \r\n updatedNodes = self.nodes.values()\r\n while(len(updatedNodes) is not 0):\r\n nodePool = updatedNodes\r\n updatedNodes = []\r\n for node in nodePool:\r\n if self.states[node.pos].isDeadEndNode():\r\n self.nodes[node.pos].isDeadEnd = True\r\n for successor in self.states[node.pos].successors.values():\r\n self.states[successor.nextNodePos].successorsByNodePos[node.pos].isDeadEnd = True\r\n updatedNodes.append(self.states[successor.nextNodePos])\r\n \r\n #node.isDeadEnd = self.states[node.pos].isDeadEndNode()#'''\r\n \r\n '''\r\n for node in self.nodes.values():\r\n if self.states[node.pos].isDeadEndNode():\r\n node.isDeadEnd = True#'''\r\n \r\n deadEndNodes = {}\r\n noneDeadEndNodes = {}\r\n for node in self.nodes.values():\r\n if not node.isDeadEnd:\r\n noneDeadEndNodes[node.pos] = node\r\n else:\r\n deadEndNodes[node.pos] = node\r\n \r\n for node in deadEndNodes.values():#\r\n actions = breadthFirstSearch(AnyTargetSearchProblem(gameState,noneDeadEndNodes.keys(),node.pos))\r\n nodeConnectedTo = self.nodes[performActions(node.pos, actions)] \r\n actions = reverseActions(actions)\r\n pos = nodeConnectedTo.pos\r\n deadEndDepth = 0.0\r\n for action in actions:\r\n pos = performActions(pos,[action])\r\n deadEndDepth += 1.0\r\n self.states[pos].deadEndDepth = deadEndDepth\r\n def willDie(self, position, distance, scaredTime = 0):#distance from our agent to closest enemy\r\n deadEndDepth = self.states[position].deadEndDepth\r\n if deadEndDepth >= distance - deadEndDepth and deadEndDepth >= scaredTime:\r\n return True\r\n return False\r\n def isDeadEnd(self, position):\r\n return self.states[position].deadEndDepth >= 0.5\r\n #def getAllStatesInDeadEnd(self, anyState):\r\n \r\n\r\n global pathMap\r\n if pathMap is None:\r\n pathMap = o0PathMap(gameState)\r\n self.pathMap = pathMap\r\n targets[self.index] = None\r\n global lastEattenFoodAreDefendingPos\r\n lastEattenFoodAreDefendingPos = None \r\n global totalFood\r\n totalFood = len(self.getFood(gameState).asList())\r\n global leftFood\r\n leftFood = totalFood\r\n #self.debugDraw(pathMap.deadEndNodes.keys(),[1,0,0])\r\n #self.debugDraw(pathMap.nodes.keys(),[0,1,0])\r\n \r\n global pathMapDebugMode\r\n if pathMapDebugMode:\r\n for state in self.pathMap.states.values():\r\n deadEndColor = 0.3 + state.deadEndDepth * 0.1\r\n if deadEndColor>1.0:\r\n deadEndColor = 1.0\r\n if state.deadEndDepth == 0:\r\n deadEndColor = 0.0\r\n \r\n nodeColor = 0.0\r\n if state.node is not None:\r\n nodeColor = 0.5\r\n self.debugDraw(state.pos,[deadEndColor,0,0])\r\n\r\n self.curryFoodScore = 0.8\r\n \r\n \r\n \r\n global defenseWall\r\n global defensePositions\r\n if len(defenseWall) is 0:\r\n foods = self.getFoodYouAreDefending(gameState)\r\n for capsule in self.getCapsulesYouAreDefending(gameState):\r\n foods[capsule[0]][capsule[1]] = True\r\n defenseWall = actionsToPositions((0,0), aStarSearch(DefenseSearchProblem(gameState, foods, self.index),nullHeuristic))\r\n defensePositions = getPositionsNeededToDefense(gameState)\r\n global defenseWallDebugMode\r\n if defenseWallDebugMode is True:\r\n self.debugDraw(defenseWall,[0,0.5,0])\r\n self.debugDraw(defensePositions,[0.5,0,0])\r\n \r\n global agentInDeadEnd\r\n agentInDeadEnd[self.index] = False", "def reset(self):\n \n self.steps = 0\n if self.episode == 0:\n self.ins = random.uniform(self.mins.values[:4],self.maxes.values[:4])\n #get the corresponding outputs:\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n self.starts = np.append(self.ins, outs)\n\n else:\n self.starts = self.state[:7] #previous episode's end state\n\n #get goals from random inputs:\n viable = False\n while viable == False:\n self.ins = random.uniform((self.mins.values[:4]+(self.mins.values[:4]*self.minmaxbuffer)),self.maxes.values[:4]-(self.maxes.values[:4]*self.minmaxbuffer))\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n \n # Check if viable:\n viable = self.test_viable(outs)\n\n self.goals = outs\n\n # These are your current inputs:\n self.ins = self.starts[:4]\n # State carries the starting points and the goals.\n self.state = np.append(self.starts,self.goals)\n\n #Track episodes and total reward.\n self.episode += 1\n self.tot_rew = 0\n\n return (self.state)", "def iter_initial_states(self):\n from six.moves import filter\n return filter(lambda s:s.is_initial, self.iter_states())", "def init_hidden_state(self, encoder_out):\n init_internal_state = []\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out)\n c = self.init_c(mean_encoder_out)\n init_internal_state.append((h, c))\n\n for i in range(1, self.decoder_number_layers):\n init_internal_state.append((\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device),\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device)\n ))\n return init_internal_state", "def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n ''' \n Your initialization code goes here, if you need any.\n '''\n\n # Store team and enemy indices\n self.teamIndices = self.getTeam(gameState)\n self.enemyIndices = self.getOpponents(gameState)\n\n # Check how recently we were near the enemy to check if we've knocked him out\n self.nearEnemyCounter = 0\n\n # Set up particle filters to track enemy locations\n self.enemyLocFilters = {}\n for i in self.enemyIndices:\n self.enemyLocFilters[i] = (ParticleFilter(gameState, i,\n gameState.getInitialAgentPosition(i)))", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.start = gameState.getAgentPosition(self.index) # starting index of the pacman\n self.numFood = len(self.getFood(gameState).asList()) # the amount of food that has not been returned\n self.hasFood = False\n self.offensiveIndex = self.getTeam(gameState)[0] # agent index of the offensive agent\n\n self.depth = 2\n '''\n Your initialization code goes here, if you need any.\n '''", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self, batch_size):\n return torch.zeros(()), torch.zeros(())", "def initial_states(self):\n return self._initial_states", "def transform_state(state):\n # TODO: automate n_enemies calculation -> only valid fot n_enemies = n_friends\n n_agents = len(state.agents)\n n_enemies = n_agents // 2 # TODO: improve this\n states_v = torch.zeros(n_agents, 5 + n_enemies) # 5 = x, y, alive, ammo, aim, enemy visible ? (x n_enemies)\n for agent_idx, agent in enumerate(state.agents):\n states_v[agent_idx, 0] = state.position[agent][0] # x\n states_v[agent_idx, 1] = state.position[agent][1] # y\n states_v[agent_idx, 2] = state.alive[agent]\n states_v[agent_idx, 3] = state.ammo[agent] / 5 # args.ammo\n states_v[agent_idx, 4] = -1 if state.aim[agent] is None else state.aim[agent].id\n idx = 5\n for other in state.agents:\n if (agent, other) in state.visible:\n states_v[agent_idx, idx] = int(state.visible[(agent, other)])\n idx += 1\n return states_v", "def get_initial_states(self):\n raise NotImplementedError()", "def get_states():\n # Getting all hidden state through time\n all_hidden_states = tf.scan(GRU, processed_input, \n initializer=initial_hidden, name='states')\n return all_hidden_states", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def __init__(self, meta_agent, agent_idx):\n self.state_size = meta_agent.state_size\n self.action_size = meta_agent.action_size\n self.num_agents=meta_agent.num_agents\n self.seed = random.seed(meta_agent.random_seed)\n self.agent_idx=agent_idx\n self.epsilon=EPSILON\n print('meta_agent.state_size, action_size,num_agents,seed_agent_idx',meta_agent.state_size,meta_agent.action_size,meta_agent.num_agents,meta_agent.random_seed,agent_idx)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(self.state_size, self.action_size, meta_agent.random_seed).to(device)\n self.actor_target = Actor(self.state_size, self.action_size, meta_agent.random_seed).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n self.hard_copy(self.actor_target,self.actor_local)\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(self.state_size*self.num_agents, self.action_size*self.num_agents, meta_agent.random_seed).to(device)\n self.critic_target = Critic(self.state_size*self.num_agents, self.action_size*self.num_agents, meta_agent.random_seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) \n self.hard_copy(self.critic_target,self.critic_local)\n print('Agent:',self.agent_idx,'\\n Actor-Critic \\n',self.actor_local,self.critic_local)\n \n # Noise process\n self.noise = OUNoise(self.action_size, meta_agent.random_seed)", "def registerInitialState(self, gameState):\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n self.startpos=gameState.getAgentPosition(self.index)\n CaptureAgent.registerInitialState(self, gameState)\n self.midwidth = gameState.data.layout.width / 2\n self.carryfoods = 0\n self.foodnum = len(self.getFood(gameState).asList())\n self.foods = self.getFood(gameState).asList()\n self.hisdefendfoods = self.getFoodYouAreDefending(gameState).asList()\n self.height = gameState.data.layout.height\n self.hispos = None\n initmap = InitMap(self,gameState)\n self.safefoodlist,self.dangerfoodlist = initmap.gainlist()\n self.deadends = initmap.gaindeadends() \n self.indanger = False\n '''\n Your initialization code goes here, if you need any.\n '''", "def registerInitialState(self, gameState):\n \n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n MultiAgentSearchAgent.registerInitialState(self, gameState)\n\n self.setDefaultWeights()\n self.historicalActions = ['Go']\n \n '''\n Your initialization code goes here, if you need any.\n '''", "def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state", "def initial_state(particle,self):\n\n self.states[particle,:] = self.base_model.agents2state()\n\n return self.states[particle]", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def get_initial_hx(self, input_seq, hidden_state):\n num_directions = 2 if self.lstm.bidirectional else 1\n # hidden state\n hidden = hidden_state.view(self.lstm.num_layers * num_directions, len(hidden_state), -1)\n # cell state\n c_zeros = torch.zeros(self.lstm.num_layers * num_directions,\n input_seq.size(0), self.lstm.hidden_size,\n dtype=input_seq.dtype, device=input_seq.device)\n return hidden, c_zeros", "def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])", "def state_reset():\n feat_self = get_self_feat(model, data['img'][0])\n feat_diff = torch.zeros_like(feat_self)\n feat_FAR = torch.tensor([0.]).cuda()\n feat_history = torch.zeros([10]).cuda()\n\n return [feat_diff, feat_self, feat_FAR, feat_history]", "def create_data(self, episodes, agents, batch_index):\n\n\t\tstate_dict = {}\n\t\talpha = 0.95\n\n\t\t# iterate over episodes\n\t\tfor e in range(episodes):\n\t\n\t\t\t# choose 2 random agents to play\n\t\t\tperm = np.random.permutation(len(agents))\n\n\t\t\tagent1 = agents[perm[0]]\n\t\t\tagent2 = agents[perm[1]]\n\n\t\t\tagent1.player_index = 1\n\t\t\tagent2.player_index = 2\n\t\t\tplayers = [agent1, agent2]\n\n\t\t\tprint('Episode {0}/{1} : {2} vs {3}'.format(\n\t\t\t\te + 1, episodes, agent1.name, agent2.name), flush=True)\n\n\t\t\t# initialize the state\n\t\t\tcurrent_state = State()\n\t\t\tlast_state = None\n\n\t\t\t# discarding boolean\n\t\t\tdiscard = False\n\n\t\t\t# keep track of the states\n\t\t\tstates_track_1 = []\n\t\t\tstates_track_2 = []\n\n\t\t\t# final rewards variables\n\t\t\toutcome_1 = 0\n\t\t\toutcome_2 = 0\n\n\t\t\t# start the episode\n\t\t\twhile True:\n\n\t\t\t\t# ending condition\n\t\t\t\tif current_state == None:\n\t\t\t\t\twindex = last_state.winner\n\t\t\t\t\t# print(self.state_descriptor(last_state, 1))\n\t\t\t\t\t# print(self.state_descriptor(last_state, 2))\n\t\t\t\t\t# print(windex)\n\n\t\t\t\t\tif windex == 1:\n\t\t\t\t\t\toutcome_1 = 1\n\t\t\t\t\t\toutcome_2 = -1\n\n\t\t\t\t\tif windex == 2:\n\t\t\t\t\t\toutcome_1 = -1\n\t\t\t\t\t\toutcome_2 = 1\n\n\t\t\t\t\tbreak\n\n\t\t\t\t# next move\n\t\t\t\telse:\n\n\t\t\t\t\t# verify for infinite loops\n\t\t\t\t\tif states_track_1.count(self.state_descriptor(current_state, 1)) > 2:\n\t\t\t\t\t\tdiscard = True\n\t\t\t\t\t\t# print('Loop')\n\t\t\t\t\t\t# print(self.state_descriptor(current_state, 1))\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tstates_track_1.append(self.state_descriptor(current_state, 1))\n\t\t\t\t\tstates_track_2.append(self.state_descriptor(current_state, 2))\n\n\t\t\t\t\tto_move = current_state.player_to_move - 1\n\t\t\t\t\tlast_state = current_state\n\t\t\t\t\tcurrent_state = players[to_move].make_move(current_state)\n\n\t\t\t# get rid of games that ended up in infinite loop\n\t\t\tif discard:\n\t\t\t\tcontinue\n\n\t\t\t# update states from the end to the beginning\n\t\t\tstates_track_1.reverse()\n\t\t\tstates_track_2.reverse()\n\n\t\t\t# update the counter and value for each state\n\t\t\tfor i in range(len(states_track_1)):\n\n\t\t\t\t# update for the first player\n\t\t\t\ttry:\n\t\t\t\t\tstate_dict[states_track_1[i]][0] += 1\n\t\t\t\t\tstate_dict[states_track_1[i]][1] += (alpha ** i) * outcome_1\n\t\t\t\texcept KeyError:\n\t\t\t\t\tstate_dict[states_track_1[i]] = [1, (alpha ** i) * outcome_1]\n\n\t\t\t\t# update for the second player\n\t\t\t\ttry:\n\t\t\t\t\tstate_dict[states_track_2[i]][0] += 1\n\t\t\t\t\tstate_dict[states_track_2[i]][1] += (alpha ** i) * outcome_2\n\t\t\t\texcept KeyError:\n\t\t\t\t\tstate_dict[states_track_2[i]] = [1, (alpha ** i) * outcome_2]\n\n\n\t\t\tif (e + 1) % 25 == 0 or e + 1 == episodes:\n\t\t\t\tprint('Saving data...', flush=True)\n\n\t\t\t\twith open('reinforcement_learning_data/states_file_' + str(batch_index) + '.txt', 'w') as f:\n\n\t\t\t\t\taugmentations = 6\n\n\t\t\t\t\tboard_data = np.empty((augmentations * len(state_dict),\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE,\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE,\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE), dtype=np.float32)\n\n\t\t\t\t\tcows_data = np.empty((augmentations * len(state_dict),\n\t\t\t\t\t\t\t\t\t\t 2), dtype=np.float32)\n\n\t\t\t\t\tlabels = np.empty((augmentations * len(state_dict),), dtype=np.float32)\n\n\t\t\t\t\tcounter = 0\n\t\t\t\t\tfor key, value in state_dict.items():\n\t\t\t\t\t\t\n\t\t\t\t\t\tf.write('{0} : {1}\\n'.format(key, value))\n\n\t\t\t\t\t\t# mirror board\n\t\t\t\t\t\tfor fl in range(1, 3):\n\t\t\t\t\t\t\tboard_data[counter] = np.flip(np.asarray(key[0]), axis=fl)\n\t\t\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\t\t\tlabels[counter] = value[1] / value[0]\n\n\t\t\t\t\t\t\tcounter += 1\n\n\t\t\t\t\t\t# rotate board\n\t\t\t\t\t\tfor rot in range(4):\n\t\t\t\t\t\t\tboard_data[counter] = np.rot90(np.asarray(key[0]), k=rot, axes=(1, 2))\n\t\t\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\t\t\tlabels[counter] = value[1] / value[0]\n\n\t\t\t\t\t\t\tcounter += 1\n\n\t\t\t\t\tboard_data.dump('reinforcement_learning_data/board_data_' + str(batch_index) + '.dat')\n\t\t\t\t\tcows_data.dump('reinforcement_learning_data/cows_data_' + str(batch_index) + '.dat')\n\t\t\t\t\tlabels.dump('reinforcement_learning_data/labels_' + str(batch_index) + '.dat')\n\n\n\t\t\t\t# rotate board\n\t\t\t\tfor r in range(4):\n\t\t\t\t\tboard_data[counter] = np.rot90(np.asarray(key[0]), k=r, axes=(1, 2))\n\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\tlabels[counter] = value[1] / value[0]", "def make(max_states=10):\n states = range(max_states)\n keys = range(max_states)\n shuffle(keys)\n return {\n 'states': states,\n 'transitions': {\n keys[index]: DummyProgramGenerator._transition(state)\n for index, state in enumerate(states)}}", "def generate_state():\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state", "def __init__(self, init_state):\n\n self.PUZZLE_TYPE = len(init_state) - 1\n self.initial_state = init_state\n self.current_state = init_state\n self.goal_state = [i for i in range(0, self.PUZZLE_TYPE + 1)]\n self.explored_states = []", "def make_initial_state(self):\n pass" ]
[ "0.6404829", "0.6404829", "0.6404829", "0.6275053", "0.62659883", "0.62305397", "0.6203651", "0.6194984", "0.61897767", "0.6183285", "0.60839427", "0.6075158", "0.6037105", "0.6035622", "0.60091317", "0.59804654", "0.59778255", "0.59626013", "0.5937987", "0.5926602", "0.59236103", "0.5922435", "0.5919982", "0.59192294", "0.5919091", "0.5906496", "0.5899491", "0.5886812", "0.58807594", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.58801347", "0.5879633", "0.5878311", "0.5848295", "0.5848295", "0.5844127", "0.5826383", "0.5812046", "0.5807226", "0.5807226", "0.5804632", "0.5804632", "0.5796721", "0.578731", "0.5785144", "0.57842904", "0.5779715", "0.57782483", "0.57782483", "0.57782483", "0.5776802", "0.57620865", "0.57620865", "0.5750406", "0.57457983", "0.57409185", "0.57374495", "0.5731881", "0.57164496", "0.57157356", "0.5714208", "0.5707622", "0.57023335", "0.5695078", "0.5661611", "0.5660962", "0.5660962", "0.5660611", "0.56519824", "0.56382763", "0.56364274", "0.5635485", "0.56315625", "0.5621021", "0.5619198" ]
0.73379165
0
Each learner has it's own logging routine, which logs directly to the pythonwide logger if log_directly==True, and returns a logging string otherwise Logging is triggered in run.py
def log(self, test_mode=None, T_env=None, log_directly = True): test_suffix = "" if not test_mode else "_test" stats = self.get_stats() try: stats["pair_action_unavail_rate"+test_suffix] = _seq_mean(stats["pair_action_unavail_rate__runner"+test_suffix]) self._add_stat("pair_action_unavail_rate", stats["pair_action_unavail_rate"+test_suffix], T_env=T_env, suffix=test_suffix, to_sacred=True) except: pass if stats == {}: self.logging_struct.py_logger.warning("Stats is empty... are you logging too frequently?") return "", {} logging_dict = dict(T_env=T_env) try: logging_dict["pair_action_unavail_rate"+test_suffix] =stats["pair_action_unavail_rate"+test_suffix] except: pass logging_str = "" logging_str += _make_logging_str(_copy_remove_keys(logging_dict, ["T_env"+test_suffix])) if log_directly: self.logging_struct.py_logger.info("{} MC INFO: {}".format("TEST" if self.test_mode else "TRAIN", logging_str)) return logging_str, logging_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self, log_directly = True):\n stats = self.get_stats()\n logging_dict = dict(advantage_mean = _seq_mean(stats[\"advantage_mean\"]),\n critic_grad_norm = _seq_mean(stats[\"critic_grad_norm\"]),\n critic_loss =_seq_mean(stats[\"critic_loss\"]),\n policy_grad_norm = _seq_mean(stats[\"policy_grad_norm\"]),\n policy_loss = _seq_mean(stats[\"policy_loss\"]),\n target_critic_mean = _seq_mean(stats[\"target_critic_mean\"]),\n T_critic=self.T_critic,\n T_policy=self.T_policy\n )\n logging_str = \"T_policy={:g}, T_critic={:g}, \".format(logging_dict[\"T_policy\"], logging_dict[\"T_critic\"])\n logging_str += _make_logging_str(_copy_remove_keys(logging_dict, [\"T_policy\", \"T_critic\"]))\n\n if log_directly:\n self.logging_struct.py_logger.info(\"{} LEARNER INFO: {}\".format(self.args.learner.upper(), logging_str))\n\n return logging_str, logging_dict", "def _log_some_info(self):\n logging.info('info')", "def log(self, logstr, *args, **kwargs):\n if self._log_func:\n self._log_func(logstr, *args, **kwargs)\n else:\n print logstr", "def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)", "def _log(self, str):\n if self.log:\n print(str)", "def _get_logger(self):", "def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n a = fn(self, *args, **kwargs)\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return a\n return wrapper", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def log(self, logstr: str):\n if self.logflag:\n print(logstr)", "def do_log(self, arg):\n arg = \" %s :custom log\" % (arg)\n log(arg)", "def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return fn(self, *args, **kwargs)\n return wrapper", "def logme(logger, *args, **kwargs):\n if logger is not None:\n logger.debug(generate_log(*args, **kwargs))", "def _get_logger(self):\n return Logger(\"Weak Algorithms\")", "def on_L1(self):\r\n self.log()", "def log(self, message):", "def log(self, message: str):", "def logging(self):\r\n return None", "def log(self, *arguments, **kwargs):\n return self.get_output('log', *arguments, **kwargs)", "def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')", "def __init__(self):\n\n self.log = logger.getLogger(name=\"directord\")", "def enablePyLangLogger(self):", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def log_info():\n # Get an instance of a logger\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('general')", "def logger(self):\n pass", "def plain(self, *args):\n self.mylog.log(logging.INFO + 1, *args)", "def logtool(self, action, **options):\n pass", "def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))", "def log(self, message):\n if VERBOSE:\n print self, message", "def setup_logging(config: Any) -> Logger:\n green = \"\\033[32m\"\n reset = \"\\033[0m\"\n logger = setup_logger(\n name=f\"{green}[ignite]{reset}\",\n level=logging.DEBUG if config.debug else logging.INFO,\n format=\"%(name)s: %(message)s\",\n filepath=config.output_dir / \"training-info.log\",\n )\n return logger", "def log():\n return logging.getLogger(__name__)", "def get_logger():\n return PLLogger.GetLogger(\"testintel\")", "def logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n res = func(*args, **kwargs)\n print(func.__name__, args, kwargs)\n return res\n return wrapper", "def logger(self, message):\n if hasattr(self.log, '__call__'):\n self.log(message.strip())", "def main():\n logger = setup_logger()\n\n logger.debug('a debug message')\n logger.info('an info message')\n logger.warning('a warning message')\n logger.error('an error message')\n logger.critical('a critical message')", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def get_logger():\n global swan_logger\n return swan_logger", "def log(self):\n return self.cpp_analyzer.user_log;", "def get_log_like(self):\n pass", "def get_log_like(self):\n pass", "def logi(*args,**kwargs):\n print(*args,**kwargs)", "def logd(*args,**kwargs):\n print(*args,**kwargs)", "def on_L2(self):\r\n self.log()", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def __init__(self):\n self.logger = logging.getLogger(FeatureEngineeringLogger.__name__)", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def test_default():\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger", "def log(self, level, msg, *args, **kwargs):\n pass", "def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))", "def log(s):\n if VERBOSE_MODE:\n print(s)", "def setup_exp_logging(config, trainer, optimizers, evaluators):\n\n #::: if (it.logger === 'clearml') { :::#\n logger = common.setup_clearml_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'mlflow') { :::#\n logger = common.setup_mlflow_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'neptune') { :::#\n logger = common.setup_neptune_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'polyaxon') { :::#\n logger = common.setup_plx_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'tensorboard') { :::#\n logger = common.setup_tb_logging(\n config.output_dir,\n trainer,\n optimizers,\n evaluators,\n config.log_every_iters,\n )\n #::: } else if (it.logger === 'visdom') { :::#\n logger = common.setup_visdom_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'wandb') { :::#\n logger = common.setup_wandb_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } :::#\n return logger", "def logIt(self, msg):\n\n if (self.logger): self.logger.logIt(msg)", "def setup_logs():\n tf.logging.set_verbosity(FLAGS.log)", "def log(msg):\n print msg", "def log(self) -> DagsterLogManager:\n return self._step_execution_context.log", "def enableCLangLogger(self):", "def logged(meth):\n def wrapper(*args):\n print(\"LOGGING {meth} {args}\".format(**locals()))\n return meth(*args) #self, ... other args\n return wrapper", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def _info(self, func):\n self.logger.info(\"llamando a %s\" % func)", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def log(self) -> misc_.Logger:\n\t\treturn self._log", "def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')", "def _configure_logging(self):\n pass", "def on_L3(self):\r\n self.log()", "def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)", "def log(self):\r\n return self._log", "def log_main(\n self,\n normal=None,\n info=None,\n warning=None,\n critical=None,\n last_known_id=None,\n **kwargs\n ):\n parameters = {\n \"normal\": normal,\n \"info\": info,\n \"warning\": warning,\n \"critical\": critical,\n \"last_known_id\": last_known_id,\n }\n return self._get(\n _name=APINames.Log, _method=\"main\", params=parameters, **kwargs\n )", "def test_log_library_context(propagate_logs, caplog, logger_name, package_name):\n logger = logging.getLogger(logger_name)\n logger.critical(\"Test!\")\n\n assert (\n caplog.records[-1].package == package_name\n ), \"Missing ray package name in log record.\"", "def logIt(msg):\n utils = CONFIG['utils'].logIt(msg) #@UnusedVariable", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def _stdlog(self, msg):\n print msg\n logger.info(msg)", "def log():\n return logging.getLogger(\"vodka\")", "def with_logging(*args, **kwargs):\n print(func.__name__ + \" was called\")\n return func(*args, **kwargs)", "def get_main_log(self) -> Any:\n return self.logger", "def __init__(self):\n self.log = logging.getLogger()", "def getLog(self):\n pass", "def set_logger( logger_fn: Callable[[str,str],any] = lambda llvl, msg: sys.stdout.write( \"[%s]: %s\\n\" % (llvl, msg) ) ):\n global LOGGER\n\n LOGGER = logger_fn", "def log(self, *args, **kwargs):\n self.game_view.log(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)", "def ThreadAwareLogger(self):\n currentThread=threading.current_thread()\n loggerName=\"%s\"%(currentThread.name)\n if hasattr(self,loggerName): \n return eval(\"self.%s\"%(loggerName))\n if hasattr(self,\"debug\") and hasattr(self.debug,loggerName): # 026 hack - tries to find logger in e.g. NodeProxy. While actually is in debug.\n return eval(\"self.debug.%s\"%(loggerName))\n else:\n if hasattr(self,\"cloneMainLogger\"): # 026 hack (delegated logger tries to find cloneMainLogger in e.g. NodeProxy.\n return self.cloneMainLogger(loggerName)\n else: return self.debug.cloneMainLogger(loggerName)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def log(message):\n if LOGPLEASE:\n logging.info(message)", "def logbase(cls, loggerObj, lvl, messages):\n if uselogger == 0 or loggerObj is None:\n return\n if lvl == 1:\n loggerObj.debug(messages)\n if lvl == 2:\n loggerObj.info(messages)\n if lvl == 3:\n loggerObj.warning(messages)\n if lvl == 4:\n loggerObj.error(messages)", "def _log(self, message):\n pass", "def _start_logging(self):\n raise NotImplementedException()", "def config_logging():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('deepcomp').setLevel(logging.WARNING)\n logging.getLogger('deepcomp.main').setLevel(logging.INFO)\n logging.getLogger('deepcomp.util.simulation').setLevel(logging.INFO)\n # logging.getLogger('deepcomp.env.entities.user').setLevel(logging.DEBUG)\n # logging.getLogger('deepcomp.env.multi_ue.multi_agent').setLevel(logging.DEBUG)\n logging.getLogger('matplotlib').setLevel(logging.WARNING)\n logging.getLogger('tensorflow').setLevel(logging.ERROR)\n gym.logger.set_level(gym.logger.ERROR)\n # structlog.configure(logger_factory=LoggerFactory())\n structlog.configure(logger_factory=LoggerFactory(),\n processors=[\n structlog.stdlib.filter_by_level,\n FloatRounder(digits=LOG_ROUND_DIGITS, not_fields=['sinr', 'signal', 'interference']),\n structlog.dev.ConsoleRenderer()\n ])", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def logline(msg):\n print msg", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def do_handle_log(self, workunit, level, *msg_elements):\r\n pass", "def test_logging():\n assert logger.name == 'wellcomeml.logger'", "def on_a(self):\r\n self.log()", "def init_LogCapture():\n global techl\n global userl\n global techlName\n global userlName\n techlName = 'techl'\n userlName = 'userl'\n techl = logging.getLogger(techlName) # create default logging object\n techl.setLevel(logging.DEBUG)\n userl = logging.getLogger(userlName) # create default logging object\n userl.setLevel(logging.DEBUG)\n return userl, techl", "def log_info(self, logger, opt_loc=''):\n if len(np.unique(self._lr)) == 1:\n logger.info('Using %s %s optimizer with lr = %.5f.' % \\\n (self.name, opt_loc, self._lr[0]))\n else:\n logger.info('Using %s %s optimizer with:' % (self.name, opt_loc))\n for forward_opt in self._optimizer_list:\n assert len(forward_opt.param_groups) == 1\n lr = forward_opt.param_groups[0]['lr']\n shapes = str([list(pm.shape) for pm in \\\n forward_opt.param_groups[0]['params']])\n logger.info(' lr = %.3f for params with shape %s.' % \\\n (lr, shapes[1:-1]))", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def startLogging (self):\n self.isLogging = True\n self.startCallback ()", "def log_method(f):\n return log(f, ignore_first=True)", "def autolog(\n every_n_iter=1,\n log_models=True,\n disable=False,\n exclusive=False,\n disable_for_unsupported_versions=False,\n silent=False,\n): # pylint: disable=unused-argument\n # pylint: disable=E0611\n import tensorflow\n\n global _LOG_EVERY_N_STEPS\n _LOG_EVERY_N_STEPS = every_n_iter\n\n atexit.register(_flush_queue)\n\n if Version(tensorflow.__version__) < Version(\"1.12\"):\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n try:\n from tensorflow.python.summary.writer.event_file_writer import EventFileWriter\n from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.python.summary.writer.writer import FileWriter\n except ImportError:\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n def train(original, self, *args, **kwargs):\n active_run = mlflow.active_run()\n global _AUTOLOG_RUN_ID\n _AUTOLOG_RUN_ID = active_run.info.run_id\n\n # Checking step and max_step parameters for logging\n if len(args) >= 3:\n mlflow.log_param(\"steps\", args[2])\n if len(args) >= 4:\n mlflow.log_param(\"max_steps\", args[3])\n if \"steps\" in kwargs:\n mlflow.log_param(\"steps\", kwargs[\"steps\"])\n if \"max_steps\" in kwargs:\n mlflow.log_param(\"max_steps\", kwargs[\"max_steps\"])\n\n result = original(self, *args, **kwargs)\n\n # Flush the metrics queue after training completes\n _flush_queue()\n\n # Log Tensorboard event files as artifacts\n if os.path.exists(self.model_dir):\n for file in os.listdir(self.model_dir):\n if \"tfevents\" not in file:\n continue\n mlflow.log_artifact(\n local_path=os.path.join(self.model_dir, file),\n artifact_path=\"tensorboard_logs\",\n )\n return result\n\n def export_saved_model(original, self, *args, **kwargs):\n global _AUTOLOG_RUN_ID\n if _AUTOLOG_RUN_ID:\n _logger.info(\n \"Logging TensorFlow Estimator as MLflow Model to run with ID '%s'\", _AUTOLOG_RUN_ID\n )\n\n serialized = original(self, *args, **kwargs)\n\n def log_model_without_starting_new_run():\n \"\"\"\n Performs the exact same operations as `log_model` without starting a new run\n \"\"\"\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )\n\n log_model_without_starting_new_run()\n\n _AUTOLOG_RUN_ID = None\n\n return serialized\n\n @picklable_exception_safe_function\n def _get_early_stop_callback(callbacks):\n for callback in callbacks:\n if isinstance(callback, tensorflow.keras.callbacks.EarlyStopping):\n return callback\n return None\n\n def _log_early_stop_callback_params(callback):\n if callback:\n try:\n earlystopping_params = {\n \"monitor\": callback.monitor,\n \"min_delta\": callback.min_delta,\n \"patience\": callback.patience,\n \"baseline\": callback.baseline,\n \"restore_best_weights\": callback.restore_best_weights,\n }\n mlflow.log_params(earlystopping_params)\n except Exception: # pylint: disable=W0703\n return\n\n def _get_early_stop_callback_attrs(callback):\n try:\n return callback.stopped_epoch, callback.restore_best_weights, callback.patience\n except Exception: # pylint: disable=W0703\n return None\n\n def _log_early_stop_callback_metrics(callback, history, metrics_logger):\n if callback is None or not callback.model.stop_training:\n return\n\n callback_attrs = _get_early_stop_callback_attrs(callback)\n if callback_attrs is None:\n return\n\n stopped_epoch, restore_best_weights, _ = callback_attrs\n metrics_logger.record_metrics({\"stopped_epoch\": stopped_epoch})\n\n if not restore_best_weights or callback.best_weights is None:\n return\n\n monitored_metric = history.history.get(callback.monitor)\n if not monitored_metric:\n return\n\n initial_epoch = history.epoch[0]\n # If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is\n # the minimum loss), the epoch corresponding to the first occurrence of the best value is\n # the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`\n # attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197\n restored_epoch = initial_epoch + monitored_metric.index(callback.best)\n metrics_logger.record_metrics({\"restored_epoch\": restored_epoch})\n restored_index = history.epoch.index(restored_epoch)\n restored_metrics = {\n key: metrics[restored_index] for key, metrics in history.history.items()\n }\n # Checking that a metric history exists\n metric_key = next(iter(history.history), None)\n if metric_key is not None:\n metrics_logger.record_metrics(restored_metrics, stopped_epoch + 1)\n\n class FitPatch(PatchFunction):\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"x\", \"y\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n early_stop_callback = None\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 6:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[5])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[5] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n early_stop_callback = _get_early_stop_callback(callbacks)\n _log_early_stop_callback_params(early_stop_callback)\n\n history = original(inst, *args, **kwargs)\n\n _log_early_stop_callback_metrics(\n callback=early_stop_callback,\n history=history,\n metrics_logger=metrics_logger,\n )\n\n _flush_queue()\n mlflow.log_artifacts(\n local_dir=self.log_dir.location,\n artifact_path=\"tensorboard_logs\",\n )\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return history\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n class FitGeneratorPatch(PatchFunction):\n \"\"\"\n NOTE: `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n To avoid unintentional creation of nested MLflow runs caused by a patched\n `fit_generator()` method calling a patched `fit()` method, we only patch\n `fit_generator()` in TF < 2.1.0.\n \"\"\"\n\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"generator\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n\n run_id = mlflow.active_run().info.run_id\n\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 5:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[4])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[4] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n result = original(inst, *args, **kwargs)\n\n _flush_queue()\n mlflow.log_artifacts(local_dir=self.log_dir.location, artifact_path=\"tensorboard_logs\")\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return result\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n def add_event(original, self, event):\n _log_event(event)\n return original(self, event)\n\n def add_summary(original, self, *args, **kwargs):\n result = original(self, *args, **kwargs)\n _flush_queue()\n return result\n\n managed = [\n (tensorflow.estimator.Estimator, \"train\", train),\n (tensorflow.keras.Model, \"fit\", FitPatch),\n ]\n\n if Version(tensorflow.__version__) < Version(\"2.1.0\"):\n # `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n # To avoid unintentional creation of nested MLflow runs caused by a patched\n # `fit_generator()` method calling a patched `fit()` method, we only patch\n # `fit_generator()` in TF < 2.1.0\n managed.append((tensorflow.keras.Model, \"fit_generator\", FitGeneratorPatch))\n\n non_managed = [\n (EventFileWriter, \"add_event\", add_event),\n (EventFileWriterV2, \"add_event\", add_event),\n (FileWriter, \"add_summary\", add_summary),\n (tensorflow.estimator.Estimator, \"export_saved_model\", export_saved_model),\n (tensorflow.estimator.Estimator, \"export_savedmodel\", export_saved_model),\n ]\n\n # Add compat.v1 Estimator patching for versions of tensfor that are 2.0+.\n if Version(tensorflow.__version__) >= Version(\"2.0.0\"):\n old_estimator_class = tensorflow.compat.v1.estimator.Estimator\n v1_train = (old_estimator_class, \"train\", train)\n v1_export_saved_model = (old_estimator_class, \"export_saved_model\", export_saved_model)\n v1_export_savedmodel = (old_estimator_class, \"export_savedmodel\", export_saved_model)\n\n managed.append(v1_train)\n non_managed.append(v1_export_saved_model)\n non_managed.append(v1_export_savedmodel)\n\n for p in managed:\n safe_patch(FLAVOR_NAME, *p, manage_run=True)\n\n for p in non_managed:\n safe_patch(FLAVOR_NAME, *p)", "def getLogs():", "def getLogs():" ]
[ "0.7626581", "0.63579595", "0.62751955", "0.61822915", "0.6168312", "0.6166976", "0.6134176", "0.61169195", "0.6112573", "0.6073991", "0.6049023", "0.6018304", "0.6004898", "0.5980599", "0.59766626", "0.59521484", "0.59347326", "0.5919445", "0.5915726", "0.5904169", "0.58677363", "0.5866601", "0.58537304", "0.58315337", "0.58186036", "0.5811278", "0.58111167", "0.5797985", "0.57946587", "0.57910496", "0.578814", "0.5781855", "0.57727456", "0.5771322", "0.5761015", "0.575687", "0.575439", "0.57530826", "0.57530826", "0.57511544", "0.5737979", "0.57042515", "0.5695287", "0.568264", "0.56700987", "0.5666324", "0.5662188", "0.56615174", "0.56606287", "0.5653453", "0.56380975", "0.5636896", "0.5636366", "0.56352216", "0.56229365", "0.5620284", "0.5618068", "0.5605239", "0.5603587", "0.56003034", "0.55935997", "0.55931705", "0.5592463", "0.55891776", "0.5584128", "0.55837995", "0.55787885", "0.5574597", "0.5568998", "0.5568307", "0.5568276", "0.55630505", "0.5552686", "0.5547523", "0.5539411", "0.5532428", "0.5531185", "0.55306524", "0.55222803", "0.5521975", "0.5521387", "0.5512651", "0.54952914", "0.5481073", "0.54783994", "0.5478172", "0.54678524", "0.54658383", "0.5465457", "0.5457917", "0.5457059", "0.5447718", "0.54459494", "0.5444267", "0.54404336", "0.54386896", "0.54375744", "0.5436428", "0.54362166", "0.54362166" ]
0.5844291
23
Sends a message to TCP server
def send(self, msg): if self.verbose: print('<- out ' + msg) self._socket.send_string(msg) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, msg):\n self.__sock.send(msg)", "def send(self, message):\n self.sock.send(message)", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def write(self, msg):\n self.sock.send(msg.encode())", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send_message():\n try:\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect((SERVER_IP, SERVER_PORT))\n print('[+] ' + SERVER_IP + ' connected!')\n position = MESSAGE.encode('utf-8')\n sock.send(bytes(position))\n sock.close()\n print('[+] Transfer completed!')\n except Exception as e:\n print('[-]', e)", "def send_message(self, message:str):\r\n msg_send = message.encode()\r\n self.server_connection.send(msg_send)", "def send(self, msg):\n if self.sock is not None:\n try:\n send_msg(self.sock, msg)\n except socket.error, msg:\n self.sock = None\n print 'Send failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]", "async def send_msg(self, message: str) -> None:\n await self.socket.sendall(message.encode())", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def send(self, msg):\n # keep track of the total sent\n # so we can make sure the whole message is sent\n msg = (msg+'\\n').encode('utf-8')\n totalsent = 0\n while totalsent < len(msg):\n sent = self.sock.send(msg[totalsent:])\n # it is bad if we still have things to send\n # but do not send anything\n if sent == 0:\n raise RuntimeError(\"connection broken\")\n totalsent += sent", "def send_tcp(host, port, message):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n\n if isinstance(message, str):\n message = message.encode('utf-8')\n\n sock.send(message)\n sock.close()", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send_message(self, data):\n self.transport.write(data)", "def send_message_tcp(address, port, message):\n socket_connection = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) # tcp\n socket_connection.connect((address, port))\n socket_connection.recv(2048)\n socket_connection.send(message)\n response = socket_connection.recv(2048)\n socket_connection.close()\n return response", "def send(self, msg):\r\n if isinstance(msg, str):\r\n msg = msg.encode()\r\n logger.debug('Sending message: %s ...', repr(msg))\r\n self._socket.sendall(msg)", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def send_message(self, data):\n header, data = format_msg(data)\n self.server_socket.sendto(header, self.client_address)\n self.server_socket.sendto(data, self.client_address)", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send_msg(sock, msg):\n msg += '\\0'\n data = msg.encode('utf-8')\n sock.sendall(data)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def send_message(self, message):\r\n if not self.is_connected():\r\n self.__root.after(self.WAIT_PERIOD, lambda: self.\r\n send_message(message))\r\n return\r\n self.__socket.send(str(message).encode())", "def sendMsg(self, msg):\n self.sockUDP.sendto(bytes(msg), self.serverAddress)\n logger.debug(\"sent: %r\", msg)", "def send_message(sock, message) -> None:\n print('[CLIENT LOG] sending message to server: {}'.format(str(message)))\n if type(message) == bytes:\n \n sock.sendall(message)\n else:\n sock.sendall(str.encode(str(message)))", "def send_message(self, message, socket):\n socket.send(bytes(message, 'UTF-8'))", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send(self, msg):\n body = json.dumps(msg)\n body = \"Content-Length: \" + str(len(body)) + \"\\r\\n\\r\\n\" + body\n body = bytes(body, \"ascii\")\n totalsent = 0\n while totalsent < len(body):\n sent = self.sock.send(body[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent", "def send(self, data):\n self.sock.send(data)", "def send(self, data):\n self.sock.send(data)", "def __send_request(self, msg, sock):\n if type(msg) != bytes:\n response = bytes(f\"{msg}\", \"ascii\")\n print(f\"--> Sending: {msg}\")\n sock.sendall(response)", "def s_send(self, command_type, msg):\n # A 1 byte command_type character is put at the front of the message\n # as a communication convention\n try:\n self.client_socket.send((command_type + msg).encode())\n except:\n # If any error occurred, the connection might be lost\n self.__connection_lost()", "def send(self,msg):\n msg = str(msg)\n if len(msg) > self.BUFFER_SIZE:\n raise DaemonSocketError(\"Message given is larger than buffer size!\")\n \n try:\n self.socket.sendall(msg.encode(self.ENCODING))\n \n except AttributeError:\n raise TypeError(\"Parameter given is not a string.\")\n except Exception as e:\n raise DaemonSocketError(e)", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "def send_data(self, msg):\n totalsent = 0\n # tt= struct.unpack('c'*len(msg), msg)\n # print(tt)\n while totalsent < len(msg):\n try:\n sent = self.sockfd.send(msg)\n except:\n print(f'{self.ip} socket failed')\n break\n if sent == 0:\n raise RuntimeError(\"Socket connection broken\")\n totalsent = totalsent + sent", "def send(self, msg):\n\n self.sock.sendto(msg, (self.UDP_IP, self.UDP_PORT))", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def send(self, msg):\n try:\n self.socket.sendall(str(msg).encode(self.ENCODING))\n except AttributeError:\n raise TypeError(\"Parameter given is not a string.\")\n except:\n raise DaemonSocketError(\"There was and issue sending the message.\")", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send_msg(sock, msg):\n\n # Stores the length of message in big-endian order\n msg = struct.pack('>I', len(msg)) + msg\n\n # Writes all bytes to the stream\n sock.sendall(msg)", "def send_message(self, content: str):\n\n data = f\"{content}\\n\".encode() # encode the text in binary representation\n self.transport.write(data) # send to a server", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send_message(self, msg):\n if msg is not None:\n try:\n self.node.write(msg.encode(encoding='UTF-8'))\n time.sleep(self.delay)\n except serial.serialutil.SerialTimeoutException:\n self.handle_congestion()\n self.send_message(msg)\n except serial.SerialException:\n self.handle_disconnection()\n self.send_message(msg)\n except:\n print(\"\\n!!!Unexpected error occurred in send_message()!!!\\n\")\n finally:\n return False\n return True", "def send(self, message, header='message'):\n if not message: return\n self.socket.sendall((header+':'+message).encode())", "def send(self, msg):\n if self.isConnected():\n pmsg = pickle.dumps(msg)\n if COMPRESS:\n pmsg = zlib.compress(pmsg)\n buffer = QByteArray()\n stream = QDataStream(buffer, QIODevice.WriteOnly)\n stream.setVersion(QDataStream.Qt_5_3)\n stream.writeUInt32(len(pmsg))\n stream.writeRawData(pmsg)\n bytesWritten = self.tcpsocket.write(buffer)\n self.tcpsocket.flush()\n self.tcpsocket.waitForBytesWritten()\n # qApp.processEvents() # send data immediately and don't wait for next mainloop\n logging.debug(\"Bytes written: %i\", bytesWritten)\n if bytesWritten > 0:\n return True\n else:\n logging.debug(\"Message not send. Not connected\")\n return False", "def send_message(self,input_message):\n try: \n self.connection.send('\\r' + input_message + '\\r')\n\n except:\n sys.stderr.write('failed to send message to server \\n') \n return False\n\n return True", "def message_send(self, msg):\n\n if not self.switch_socket:\n # Sending a string indicates the message is ready to go\n raise Exception(\"no socket\")\n\n if msg.xid == None:\n msg.xid = util.gen_xid()\n\n outpkt = msg.pack()\n\n self.logger.debug(\"Msg out: version %d class %s len %d xid %d\",\n msg.version, type(msg).__name__, len(outpkt), msg.xid)\n\n with self.tx_lock:\n if self.switch_socket.sendall(outpkt) is not None:\n raise AssertionError(\"failed to send message to switch\")\n\n return 0 # for backwards compatibility", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def send(self, message):\n pass", "def serverSend(self, content, binary=False):\n if self.state == self.STATE_CONNECTING:\n self.serverAccept()\n self.last_data = time.time()\n logger.debug(\"Sent WebSocket packet to client for %s\", self.reply_channel)\n if binary:\n self.sendMessage(content, binary)\n else:\n self.sendMessage(content.encode(\"utf8\"), binary)", "def send_request(self, message):\n try:\n self.transport.write(message.encode())\n _LOGGER.debug('ROTEL Data sent: {!r}'.format(message))\n except:\n _LOGGER.debug('ROTEL : transport not ready !')", "def send_msg(msg, socket_out):\n\n if msg != '':\n # send msg\n try:\n socket_out.sendall(msg.encode())\n except:\n pass", "def write(self):\r\n assert self.status == SEND_ANSWER\r\n sent = self.socket.send(self.message)\r\n if sent == len(self.message):\r\n self.status = WAIT_LEN\r\n self.message = ''\r\n self.len = 0\r\n else:\r\n self.message = self.message[sent:]", "async def send_msg(self, msg):\n try:\n logging.info(\"Sending: %s\", msg)\n self.writer.write(msg.encode())\n await self.writer.drain()\n\n except Exception as e:\n logging.error(\"Command could not be encoded; %s\", e)", "def send_message(self, msg: dict):\n txrx_debug('{} sending {} msg to {}'.format(msg['src'], msg['type'], msg['dst']))\n self.sock.send(dumps(msg).encode('utf-8'))", "async def send(self, message):", "def send_message(self,data):\n num_bytes = len(data)\n message = WriteMessage()\n message.write_uint32(num_bytes)\n message.data.extend(data)\n self.socket.sendall(message.data)", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def send(self,data):\n\t\ttry:\n\t\t\tif len(self.protocol) > 1:\n\t\t\t\tfor protocol in reversed(self.protocol[1:]):\n\t\t\t\t\tdata = protocol.encode(data)\n\t\t\tself.protocol[0].sendTcpSocket(data, self.socket)\n\t\texcept protocols.ProtocolIsNotRespectedError as e:\n\t\t\traise CouldNotSendRequestError(\"\", e)\n\t\texcept Exception as e:\n\t\t\traise SocketError(\"Client socket died\", e)", "def send(event=None): # event is passed by binders.\n print(\"socket\")\n print(client_socket)\n msg = my_msg.get()\n my_msg.set(\"\") # Clears input field.\n try:\n client_socket.send(bytes(msg, \"utf8\"))\n except BrokenPipeError:\n error_msg = \"Unable to send\"\n msg_list.insert(tkinter.END, error_msg)\n \n if msg == \"{quit}\":\n client_socket.close()\n top.quit()", "def sendCommand(ser, msg):\n ser.write(\"%s\\r\\n\" % (msg))\n return", "def sendMessage(self, message, host, port):\n try:\n self._socket.settimeout(5)\n self._socket.connect((host, port))\n self._socket.send(message)\n data = self._socket.recv(16)\n if data == \"ACK\":\n return 1\n else:\n return 0\n except socket.error:\n return 0", "def send(serial_port, message):\n full_message = ''.join((message, \"\\n\"))\n\n (debug and\n print(\"server:\" + full_message + \":\") )\n\n reencoded = bytes(full_message, encoding='ascii')\n serial_port.write(reencoded)", "def Send(self, payload):\n self._sock.send(payload)", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def _send(self, message: str) -> None:\n logger.info(\"Send: {}\".format(message['type']))\n logger.debug(\"Send: {}\".format(message))\n\n message_b = (json.dumps(message) + '\\r\\n').encode()\n self.transport.write(message_b)", "def send(self, data):\n self.socket.sendall(data)", "def send(event=None): # event is passed by binders.\r\n msg = my_msg.get()\r\n my_msg.set(\"\") # Clears input field.\r\n client_socket.send(bytes(msg, \"utf8\"))\r\n if msg == \"{quit}\":\r\n client_socket.close()\r\n top.quit()", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(self, client, message):\n self.stdout.write(message)\n client.send(f'HTTP/1.1 200 OK\\r\\n\\r\\n{message}'.encode(\"utf-8\"))\n client.close()", "def client(ip, port, message): \n # Conectado con el servidor\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'utf-8'))\n response = sock.recv(BUF_SIZE)\n print (\"Recibido por el cliente: %s\" %response)\n finally:\n sock.close()", "def send(send_msg, sock):\r\n send_msg = send_msg.encode()\r\n sock.send(send_msg)\r\n msg = sock.recv(1024)\r\n msg = msg.decode()\r\n return msg", "def send_net_message_client(message, client_addr):\n serverSocket.sendto(message, client_addr)", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def send(self, command):\n if not self.debug:\n self.socket.send(command)\n logging.debug(\"SEND %s\" % command)\n else:\n logging.info(\"SEND %s\" % command)", "def send(self, data):\n if self.print_send:\n dumpdata.dumpdata(' > Send: ', '{:02x}', data)\n try:\n self.socket.send(data)\n except ConnectionAbortedError as err:\n raise Closed(err)", "def send(event=None): #event is passed by binders.\n try:\n msg = my_msg.get()\n my_msg.set(\" \") #Clears input field.\n client_socket.send(bytes(msg, \"utf8\"))\n \n except:\n \n HOST = '10.0.0.8'\n PORT = 8081\n ADDR = (HOST, PORT)\n \n s = socket(AF_INET, SOCK_STREAM)\n client_socket.bind(ADDR)\n s.connect((HOST, PORT))\n s.send(msg)\n \n if msg == \"{quit}\":\n client_socket.close()\n top.destroy()", "def just_send(self, client_socket, msg):\n msg = msg.encode('utf-8')\n message_header = f\"{len(msg):<{HEADER_LENGTH}}\".encode('utf-8')\n client_socket.send(message_header + msg)\n return", "def send_TCP_message(client, action, message, no_thread=False):\n\n def func(client, action, message):\n try:\n l.info(\n \"Sending server request with action: {} and message: {}\".format(\n action, message\n )\n )\n response = client.send_request(str(action), message)\n except Exception as err:\n l.info(\"Server Error {}\".format(err))\n return False\n if response:\n l.info(\"Server responded with {}\".format(response))\n return response\n else:\n l.info(\"Server seems to be offline.\".format(response))\n return False\n\n if client and not no_thread:\n x = Thread(target=func, args=(client, action, message))\n x.run()\n elif client and no_thread:\n return func(client, action, message)\n else:\n l.warning(\"No client defined for sending TCP packages. No message dispatched!\")", "def send_as_server(self, command, msg):\n self._write(f':{self.server.name} {command} {msg}')", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send(self, data):\n if self._sock is None:\n raise ConnectionError(\"Error: a connection was not initiated, use 'connect' first.\")\n self._sock.send(data)", "def client(ip, port, message):\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'ascii'))\n response = str(sock.recv(BUF_SIZE), 'ascii')\n print(\"Client received: {}\".format(response))\n finally:\n sock.close()", "def send(self, msg):\n raise NotImplementedError(\"DataStream does not implement send.\")" ]
[ "0.857833", "0.8029346", "0.7997576", "0.7927077", "0.7885072", "0.7848192", "0.7783163", "0.7779231", "0.77461135", "0.77231276", "0.7711107", "0.7656483", "0.7619312", "0.7602865", "0.75940025", "0.75879824", "0.75759625", "0.7557794", "0.7549099", "0.7539888", "0.7537408", "0.7514428", "0.7512818", "0.7443725", "0.7440493", "0.7440307", "0.74215096", "0.7402791", "0.739231", "0.7391191", "0.73746526", "0.7356845", "0.73515373", "0.73439854", "0.73349464", "0.72957903", "0.7286904", "0.7286904", "0.72667253", "0.72655505", "0.7260457", "0.7260381", "0.72573584", "0.725548", "0.7217643", "0.72082883", "0.7196631", "0.71789867", "0.71692353", "0.71498907", "0.71495", "0.7149477", "0.7149477", "0.7149477", "0.7148989", "0.7136868", "0.70911884", "0.70839846", "0.7073439", "0.7062346", "0.70547837", "0.7053026", "0.70475894", "0.70378256", "0.70359635", "0.70196056", "0.7018233", "0.701508", "0.70139396", "0.6974268", "0.6963352", "0.6961151", "0.6961151", "0.696089", "0.69599015", "0.6959502", "0.69552064", "0.69463253", "0.69324136", "0.69265115", "0.69173294", "0.69159144", "0.68926185", "0.688929", "0.6874542", "0.6867431", "0.68662995", "0.68641394", "0.68613297", "0.6854372", "0.68536234", "0.6853521", "0.6845913", "0.6845404", "0.68411165", "0.68400717", "0.6830187", "0.6827774", "0.6824151", "0.682101" ]
0.7415012
27
Checks the ZeroMQ for data
def recv(self): return self._socket.recv()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_data_is_empty(self, app, data_queues):\n res = self._call(app, {\"invalid\": 0}, ip=self.test_ip, status=200)\n self.check_response(data_queues, res, \"ok\")\n self.check_queue(data_queues, 0)", "def _chk_empty(self, queue, receiver):\n try:\n msg = receiver.fetch(timeout=0)\n self.assert_(False, \"Queue \\\"%s\\\" not empty: found message: %s\" % (queue, msg))\n except Empty:\n pass", "def DataAvailable(self) -> bool:", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def check_for_incoming_info(self):\n\n if self.test_message_response:\n self.parse_incoming_message(self.test_message_response)\n return True\n\n POLL_ONLY_TIMEOUT_VALUE = 0\n got_at_least_one = False\n while (True):\n readables, writables, errors = select.select([self.socket_datastream], [], [], POLL_ONLY_TIMEOUT_VALUE)\n if not self.socket_datastream in readables:\n return got_at_least_one\n got_at_least_one = True\n data, remote_ip_port = self.socket_datastream.recvfrom(MAX_EXPECTED_MSG_SIZE)\n if remote_ip_port != self.ip_port_arduino_datastream:\n errorhandler.loginfo(\"Msg from unexpected source {}\".format(remote_ip_port))\n else:\n errorhandler.logdebug(\"msg received:{}\".format(data.hex()))\n self.parse_incoming_message(data)", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def has_data(self):\n return len(self.data) > 0", "def check_missing_data():\n from mspray.apps.main.utils import sync_missing_sprays\n\n sync_missing_sprays(FORM_ID, print)", "def check_ack_queue(self):\r\n try:\r\n while True:\r\n ack = self.ack_queue.get_nowait()\r\n self.handle_ack(ack)\r\n except queue.Empty:\r\n pass", "def empty(self):\n return 0 >= len(self.__data)", "def is_empty(self):\r\n return self.buff==[]", "def check_updates(self):\n self.db.__connect__()\n self.ZULIP_SERVICE_TOPIC_MAP = self.db.get_topics()\n self.db.__disconnect__()", "def verify_queue_empty(self):\n self.assert_sample_queue_size(DataParticleType.VELOCITY_PARTICLE, 0)\n self.assert_sample_queue_size(DataParticleType.TIME_PARTICLE, 0)", "def test_filter_messages_empty_data(self):\n pass", "def check_connection(self):\n pass", "def is_empty(self):\n return len(self.data) == 0", "def new_messages(self):\n ready, _, _ = select([self.socket], [], [], 0.0)\n return self.socket in ready", "def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False", "def empty(self) -> bool: \n if(self.queue is not None and len(self.queue) > 0):\n print(\"len > 0\" )\n return False\n else:\n print(\"len = 0\" )\n return True", "def check_message(self, msg):\n pass", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def is_empty(self):\n if len(self.messages) < 1:\n return True\n else:\n return False", "def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)", "def queue_empty(self, queue_name):\n return self.queue_message_count(queue_name) == 0", "def __check_array__(self):\n if self.data_array is None:\n mess = 'No data array is present, please load before attempting to create XYZ array.'\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message(mess)\n else:\n print(mess)\n return False\n else:\n return True", "def test_empty_messages(self):\n self.failureResultOf(self.producer.send_messages(\"topic\"), ValueError)\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[]), ValueError)", "def receiveData(self):\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.PAIR)\n #self.socket.connect(\"tcp://localhost:5556\")\n self.socket.connect(\"ipc:///tmp/mysocket\")\n print(\"Communication via IPC - Mac and Linux Only\")\n #Envia uma mensagem pedindo para comecar\n startstr = \"START\"\n self.socket.send(startstr.encode('utf-8'))\n time.sleep(1)\n #Recebe os dados\n while True:\n contents = self.socket.recv()\n self.commsqueue.put(contents)", "def test_initially_empty(self):\n self.assertEqual(0, len(self.redis.redis[LIST1]))", "def __check_ping(self):\n if not self.communications.ping():\n self.communications.ping(True)", "def is_empty(self):\n return self.count.addCallback(lambda x: x == 0)", "def has_data(self):\n if len(self.channels) > 0:\n return True\n return False", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def test_empty_body(self, app, data_queues, redis):\n res = self._call(app, \"\", ip=self.test_ip, method=\"post\", status=200)\n self.check_response(data_queues, res, \"ok\")\n self.check_queue(data_queues, 0)\n if self.apikey_metrics:\n # ensure that a apiuser hyperloglog entry was added for today\n today = util.utcnow().date().strftime(\"%Y-%m-%d\")\n expected = \"apiuser:%s:test:%s\" % (self.metric_type, today)\n assert [key.decode(\"ascii\") for key in redis.keys(\"apiuser:*\")] == [\n expected\n ]\n # check that the ttl was set\n ttl = redis.ttl(expected)\n assert 7 * 24 * 3600 < ttl <= 8 * 24 * 3600", "def check():", "def check(self):\n if len(self._logs) == 0:\n return False\n else:\n return True", "def test_zero_msgs(self):\n msg = []\n self.dead_letter.handle_messages(msg)", "def data_available(self):\n return (self.status & 0x08) != 0", "def is_empty (self):\n return len(self.network) == 0", "def is_empty(self):\n\n # If the queue is an empty list, self._data would return False\n # So if the queue is empty we want to return true\n # modify with not self._data\n return not self._data", "def _monitor_for_zero_connected_peers(self):\n if len(self.Peers) == 0 and len(self.connection_queue) == 0:\n if self.peer_zero_count > 2:\n logger.debug(\"Peer count 0 exceeded max retries threshold, restarting...\")\n self.Restart()\n else:\n logger.debug(\n f\"Peer count is 0, allow for retries or queued connections to be established {self.peer_zero_count}\")\n self.peer_zero_count += 1", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "def data_checker(xml):\n if not xml or 'response code=\"102\"' in xml:\n LOGGER.debug(\"The service 'oclc' is temporarily down!\")\n return False\n return True", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def check(self, connection):\n return True", "def is_empty(self):\n return self.channels is None or self.timestamp is None", "def data_ready(self):\n data_ready = len(self.barcode) > 0\n data_ready &= self.price > 0\n data_ready &= len(self.description) > 0\n return data_ready", "def data_ready(self) -> bool:\n data_ready = ctypes.c_uint8()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetMeasurementDataReady(self.dev, byref(data_ready)))\n return data_ready.value != 0", "def func_data(self, data, get_recv, get_data):\n if get_data:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n else:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[1].lower().rstrip()\n if checking == 'data':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(checking)\n self.func_sender(message)\n return True", "def isEmpty(self):\n return self.qSize == 0", "def is_empty(self):\n return self.queue == []", "def check_for_new_data(self):\n return", "def __len__(self):\n return len(self.__broker)", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def is_msg_inited(self):\n pass", "def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()", "def am_i_offline(self):\n # -num_of_last_check_rounds_consider won't raise IndexError when len(self.data) is smaller\n logger.debug(\"called am_i_offline and data is: %s\" % self.data)\n if not self.data:\n return False\n for dict_check_results in self.data[-self.num_of_last_check_rounds_consider:]:\n for res in list(dict_check_results.values()):\n if res == 0:\n return False\n else:\n return True", "def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return len(self.queue) == 0", "def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )", "def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)", "def isMissingDataAlarm(self):\n\n if(self.missingData and len(self.subjects) != 0):\n for animal in self.subjects:\n message = \"Missing data for \" + animal + \".\"\n if(self.log):\n logging.info(message)\n self.sendToAllSubscribers(message, \"Alert: Missing data\")", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def test_read_no_data(self):\n payload = payloads.MACRequestPayload()\n args = (self.encoding_no_data,)\n self.assertRaisesRegex(\n exceptions.InvalidKmipEncoding,\n \"expected mac request data not found\",\n payload.read,\n *args\n )", "def func_empty_check(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if str(check) == '':\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n return True", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n assert(len(res) > 0)\n self.assert_equal_data_dict(res)", "def Check_Communications(self):\n self.serial_status = False\n try:\n self.serial_status = self.ser.isOpen()\n except Exception as e:\n print \"No communication to stage serial bus. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.serial_status = False\n self.encoder_status = False\n try:\n self.encoder_status = True\n for i in range(3):\n value = self.fd_channel[i].read(3)+b'\\x00' \n # read the 24 bit register (3 bytes) and add a fourth byte \n # to make it an integer.\n signed_value = struct.unpack(\"=I\", value)[0] \n if signed_value < 0 or signed_value > 2**24:\n self.encoder_status = False\n break\n except Exception as e:\n print \"No communication to optical encoders. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.encoder_status = False\n self.comm_status = self.serial_status and self.encoder_status\n return", "def check_if_full(self):\n pass", "def handle_empty(self):\n if self.no_data: return\n for subtree in (self.confdata, self.rpcs, self.notifications):\n if len(subtree.children) == 0:\n SchemaNode(\"empty\", subtree)", "def is_full(self):\n return self.redis.count() >= PROXY_NUMBER_MAX", "def isEmpty(self):\n return 0 == len(self.queue)", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "def test_udp_no_records():\n assert dnsck_query(\"8.8.8.8\", \"test.google.com\", \"A\", 1) == 0", "def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that", "def check_message(m, n_frames, tx_id, data):\n assert len(m.frames) == n_frames\n assert m.tx_id == tx_id\n assert m.data == bytearray(data)", "def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False", "def is_empty(self):\n return len(self.__queue) > 0", "def checkQueue( self ):\n if self.queue:\n yield self.writeToSerial( self.queue.pop( 0 ) )\n else:\n self.free = True", "def test_snmpprocss_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"SNMP Request failed. See log for details\"\n }\n self.test_snmpprocess.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)", "def is_empty(self):\n if len(self._data) == 0:\n return True\n return False", "def is_empty(self):\n return len(self.the_queue) == 0", "def is_empty_input_socket(self, socket_name):\n self.check_input_socketname(socket_name)\n return False if self.__inputs[socket_name]['data'] else True", "def has_message_available(self):\n return not self.feedback_log.empty()", "def check_messages(self, broker, queue, exp_msg_list, transactional=False, empty=False, ack=True, browse=False,\n emtpy_flag=False):\n if emtpy_flag:\n num_msgs = 0\n else:\n num_msgs = len(exp_msg_list)\n ssn = broker.connect().session(transactional=transactional)\n rcvr = ssn.receiver(self.rcv_addr(queue, browse=browse), capacity=num_msgs)\n if num_msgs > 0:\n try:\n recieved_msg_list = [rcvr.fetch(timeout=0) for i in range(num_msgs)]\n except Empty:\n self.assert_(False, \"Queue \\\"%s\\\" is empty, unable to retrieve expected message %d.\" % (queue, i))\n for i in range(0, len(recieved_msg_list)):\n self.assertEqual(recieved_msg_list[i].content, exp_msg_list[i].content)\n self.assertEqual(recieved_msg_list[i].correlation_id, exp_msg_list[i].correlation_id)\n if empty:\n self._chk_empty(queue, rcvr)\n if ack:\n ssn.acknowledge()\n if transactional:\n ssn.commit()\n ssn.connection.close()\n else:\n if transactional:\n ssn.commit()\n return ssn", "def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)", "def empty(self): \n return self.qsize() == 0", "def check_connection(self):\n return False", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.MessageDataset())) == 138737", "def test_carol_sent(self):\n messages = list(self.carol_storage.sent)\n self.assertEqual(2, len(messages))\n self.assertIn(self.read_message, messages)\n self.assertIn(self.archived_message, messages)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def _check_data_size(self):\n if len(self.list_data) < self.n_cluster:\n self.n_cluster = len(self.list_data)", "def isEmpty(self):\n\t\tself.logger.debug('Check if queue job is empty')\n\t\tisEmpty = self.queue.empty()\n\t\tself.logger.debug('Queue job is empty ?: %s'%(isEmpty))\n\t\treturn isEmpty" ]
[ "0.6370523", "0.6197868", "0.61302525", "0.6033723", "0.5991278", "0.59654814", "0.58306265", "0.58306265", "0.57430387", "0.57194114", "0.567935", "0.56438", "0.5607466", "0.5605082", "0.559692", "0.5590848", "0.5575481", "0.5566702", "0.55629605", "0.5525354", "0.5514238", "0.55129576", "0.5502078", "0.54989135", "0.54981095", "0.5493669", "0.548696", "0.5454129", "0.5452089", "0.544392", "0.5432076", "0.5424204", "0.5421626", "0.54197437", "0.54197437", "0.54197437", "0.54197437", "0.54197437", "0.54197437", "0.54197437", "0.5416874", "0.5414725", "0.54135776", "0.54107493", "0.54099333", "0.54042464", "0.53974724", "0.53827244", "0.5381853", "0.5381596", "0.5377583", "0.5376567", "0.53696936", "0.5368451", "0.53682435", "0.5366829", "0.53664", "0.53657806", "0.5358902", "0.5354536", "0.5352783", "0.53514844", "0.535092", "0.5350015", "0.5348108", "0.53463054", "0.53463054", "0.53404063", "0.53316456", "0.5329029", "0.53209496", "0.5319301", "0.53174305", "0.5317159", "0.53139997", "0.5310092", "0.5305446", "0.5303453", "0.5297827", "0.5296408", "0.52938205", "0.5293528", "0.5285595", "0.5273414", "0.5269791", "0.5264589", "0.5262067", "0.5252346", "0.5246549", "0.5241573", "0.5240707", "0.52390903", "0.5233006", "0.52283883", "0.52282", "0.521515", "0.5212584", "0.5212314", "0.5209351", "0.52057266", "0.52028567" ]
0.0
-1
Initializes and returns an LSL outlet
def initializeOutlet(interface): info = StreamInfo('OpenBCI_EEG', 'EEG', 4, 256, 'float32', 'openbci12345') outlet = StreamOutlet(info) return outlet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)", "def _add_lamp_outlets(self):\r\n lst = self.model.get_all_lamp_outlets()\r\n\r\n for itm in lst:\r\n self._add_lamp_outlet(itm)", "def __init__(self) -> None:\n ptr = lib.wlr_output_layout_create()\n self._ptr = ffi.gc(ptr, lib.wlr_output_layout_destroy)\n\n self.add_event = Signal(ptr=ffi.addressof(ptr.events.add))\n self.change_event = Signal(ptr=ffi.addressof(ptr.events.change))\n self.destroy_event = Signal(ptr=ffi.addressof(ptr.events.destroy))", "def connect_ls_to_lr(ls, lr, rp, rp_ip, rp_mac, db):\n ovn_nbctl(\"-- --id=@lrp create Logical_Router_port name=%s network=%s \"\n \"mac=%s -- add Logical_Router %s ports @lrp -- lsp-add %s \"\n \"rp-%s\" % (rp, rp_ip, rp_mac, lr, ls, rp), db)\n ovn_nbctl(\"set Logical-Switch-Port rp-%s type=router \"\n \"options:router-port=%s addresses=%s\" % (rp, rp, rp_mac), db)", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.foundation", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.pythia", "def __init__ (self, scHandle):\n Greenlet.__init__(self)\n\n self.scHandle = scHandle", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def __init__(self, *args):\n this = _ida_hexrays.new_lvar_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, mpls_ttl=None):\n super().__init__()\n self.mpls_ttl = mpls_ttl", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.phe", "def __init__(self, als, cfg=None):\n\n self.als = als\n if cfg is None:\n cfg = AlsDEMCfg()\n self.cfg = cfg", "def __init__(self, name: str, hw_device: KnauerDAD):\n super().__init__(name, hw_device)\n self.lamp = name\n self.add_api_route(\"/lamp_status\", self.get_lamp, methods=[\"GET\"])\n self.add_api_route(\"/status\", self.get_status, methods=[\"GET\"])", "def __init__(self, window):\n self._ptr = lib.SDL_GL_CreateContext(window._ptr)", "def __init__(self, ns=None):\n this = _libSALOME_LifeCycleCORBA.new_SALOME_LifeCycleCORBA(ns)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args, **kwargs):\n super(LinlLis, self).__init__(\n ('linl', Bits(maxlen=4)),\n ('lis', Bits(maxlen=4)),\n *args, **kwargs\n )", "def __init__(self, label, LEDStrips, colors):\n\n self._label = label\n self._LEDStrips = LEDStrips\n self._colors = colors", "def setupLL_Native(self):\n self.LLN_Selector = slicer.qMRMLNodeComboBox()\n self.LLN_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLN_Selector.noneEnabled = True\n self.LLN_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLN_Selector.addEnabled = 0\n self.LLN_SelectorLabel = qt.QLabel('Native Look Locker')\n self.LLN_Selector.setToolTip(\"Select the pre contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLN_SelectorLabel, self.LLN_Selector)", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def setupLL_Enhanced(self):\n self.LLE_Selector = slicer.qMRMLNodeComboBox()\n self.LLE_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLE_Selector.noneEnabled = True\n self.LLE_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLE_Selector.addEnabled = 0\n self.LLE_SelectorLabel = qt.QLabel('Enhanced Look Locker')\n self.LLE_Selector.setToolTip(\"Select the post contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLE_SelectorLabel, self.LLE_Selector)", "def IBOutlet(name=None):\n if name is None:\n return ivar(isOutlet=1)\n else:\n return ivar(name, isOutlet=1)", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "def __init__(self):\n import visa\n\n rm = visa.ResourceManager()\n target = 'Agilent Technologies,8163B,MY48208514,V5.25(72637)'\n\n for dev in rm.list_resources():\n try:\n inst = rm.open_resource(dev)\n name = inst.query('*IDN?') # Agilent Technologies,8163B,MY48208514,V5.25(72637)\n if target in name:\n # TODO: check that the slot contains the correct module\n self._inst = inst\n except:\n continue\n\n if self._inst is None:\n raise RuntimeError(\"Target resource {} cannot be found in the VISA resource manager\".format(target))\n print(\"Connected to \" + self.id())", "def __init__(self, config, ll=None, osimmodel=None, landmarks=None):\n self.config = config\n self.ll = ll\n self.trcdata = landmarks\n self.gias_osimmodel = None\n if osimmodel is not None:\n self.set_osim_model(osimmodel)\n self._unit_scaling = dim_unit_scaling(\n self.config['in_unit'], self.config['out_unit']\n )", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n config = kwargs.get(\"config\", kwargs)\n self.connection_type = config.get(\"connection_type\", None)\n self.connection = connection_decider.connection(device=self,\n conn_type=self.connection_type,\n **kwargs)\n self.connection.connect()\n self.consoles = [self]\n super(PrplMeshStation, self).__init__(*args, **kwargs)\n self.iface_dut = self.iface_wifi = self.kwargs.get(\n 'iface', 'wlan0')\n self.driver_name = config.get(\"driver\", \"nl80211,wext\")\n self.mac = self.get_mac()\n\n # kill all wpa_supplicant relevant to active interface\n self.wifi_disconnect()\n # Turn on and off wlan iface just in case\n self.disable_and_enable_wifi()", "def __init__( self, owner, shoulderindex, wristindex, ctrlindex=0 ):\n\t\tself.shoulder = ServoJoint( owner, shoulderindex, ctrlindex ) \n\t\tself.wrist = ServoJoint( owner, wristindex, ctrlindex )", "def __init__(self):\n self.new_dll = DLinkedList()", "def __init__(self, layout=None):\n self.presentation_ended = False\n self.presentation = Presentation()\n self.layout = layout\n self.master_connection = None\n self.source = ''\n self.beacon = Beacon()\n self.beacon.start_beaconing()", "def __init__(self, dli_endpoint = None, verbosity = dlsApi.DLS_VERB_WARN, **kwd):\n\n # Let the parent set the server (if possible) and verbosity\n dlsApi.DlsApi.__init__(self, dli_endpoint, verbosity)\n \n # If the server is not there yet, try from DLI_ENDPOINT\n if(not self.server):\n self.server = environ.get(\"DLI_ENDPOINT\")\n\n # If still not there, give up \n if(not self.server):\n raise SetupError(\"Could not set the DLS server to use\")\n \n # Extract the root directory\n dlsserver=self.server.split('/')[0]\n dlspath=self.server.replace(dlsserver,'')\n dlspath = dlspath.rstrip('/')\n\n # Set the server for LFC API use\n self.server=dlsserver\n\n if (not dlspath):\n raise SetupError(\"No LFC's root directory specified for DLS use\")\n\n # Set the root directory (might be empty, since DLI back-end may be other than LFC)\n self.root = dlspath\n\n # Create the binding \n try: \n if(self.verb >= DLS_VERB_HIGH):\n print \"--DliClient.init(%s)\" % self.server\n self.iface = dliClient.DliClient(self.server)\n except dliClient.SetupError, inst:\n raise SetupError(\"Error creating the binding with the DLI interface: \"+str(inst))", "def __init__(self):\n this = _sunpos.new_cLocation()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, sptr, aaidee):\n pos_vec = VECTOR()\n vel_vec = VECTOR()\n fwd_vec = VECTOR()\n up_vec = VECTOR()\n self._sysptr = sptr\n self._id = aaidee\n ckresult(\n _dll.FMOD_System_Get3DListenerAttributes(\n self._sysptr,\n aaidee,\n byref(pos_vec),\n byref(vel_vec),\n byref(fwd_vec),\n byref(up_vec),\n )\n )\n self._pos = pos_vec\n self._vel = vel_vec\n self._fwd = fwd_vec\n self._up = up_vec\n self._rolloff_callback = None", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def __init__(self):\n this = _libsbml.new_SBO()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.defaultTheme = \"DarkAmber\"\n self.version = 1.4\n self.versionName = \"class update\"\n self.title = \"Lms GUI default window\"\n self.layout = [[sg.Text(\"This is the base window class layout.\")]]\n self.elementJustification = 'c'\n self.location=(500, 300)\n self.running = True\n self.window = None\n self.event = \"\"\n self.values = []\n self.nextAction = None", "def __init__(self, logical_services_node=None):\n super(ServicesNodeIpSecConfig, self).__init__()\n self.log = logger.setup_logging(self.__class__.__name__)\n self.schema_class = 'ipsec_config_schema.IpSecConfigSchema'\n\n if logical_services_node is not None:\n self.set_connection(logical_services_node.get_connection())\n\n self.set_create_endpoint(\"/lservices-nodes/\" + logical_services_node.id + \"/service-bindings/ipsec/config\")\n self.id = None", "def __init__(self, config, loop):\n self.config = config\n self.loop = loop", "def __init__(self):\n this = _libsbml.new_ListWrapperSBMLNamespaces()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.__uplinker = None\n self.__downlinker = None", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def __init__(self, config, display, input_source):\n self.config = config\n self.display = display\n self.input_source = input_source\n self.state = State(config)\n for _ in range(0, self.config.orb_count):\n self.state.spawn_orb()", "def initialize_sido(\n blk, state_args=None, outlvl=idaeslog.NOTSET, solver=None, optarg=None\n):\n if optarg is None:\n optarg = {}\n\n # Set solver options\n init_log = idaeslog.getInitLogger(blk.name, outlvl, tag=\"unit\")\n solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag=\"unit\")\n\n solver_obj = get_solver(solver, optarg)\n\n # Get initial guesses for inlet if none provided\n if state_args is None:\n state_args = {}\n state_dict = blk.properties_in[\n blk.flowsheet().time.first()\n ].define_port_members()\n\n for k in state_dict.keys():\n if state_dict[k].is_indexed():\n state_args[k] = {}\n for m in state_dict[k].keys():\n state_args[k][m] = state_dict[k][m].value\n else:\n state_args[k] = state_dict[k].value\n\n # ---------------------------------------------------------------------\n # Initialize control volume block\n flags = blk.properties_in.initialize(\n outlvl=outlvl,\n optarg=optarg,\n solver=solver,\n state_args=state_args,\n hold_state=True,\n )\n blk.properties_treated.initialize(\n outlvl=outlvl,\n optarg=optarg,\n solver=solver,\n state_args=state_args,\n hold_state=False,\n )\n blk.properties_byproduct.initialize(\n outlvl=outlvl,\n optarg=optarg,\n solver=solver,\n state_args=state_args,\n hold_state=False,\n )\n\n init_log.info_high(\"Initialization Step 1 Complete.\")\n\n # ---------------------------------------------------------------------\n # Solve unit\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n results = solver_obj.solve(blk, tee=slc.tee)\n\n init_log.info_high(\"Initialization Step 2 {}.\".format(idaeslog.condition(results)))\n\n # ---------------------------------------------------------------------\n # Release Inlet state\n blk.properties_in.release_state(flags, outlvl)\n\n init_log.info(\"Initialization Complete: {}\".format(idaeslog.condition(results)))\n\n if not check_optimal_termination(results):\n raise InitializationError(\n f\"{blk.name} failed to initialize successfully. Please check \"\n f\"the output logs for more information.\"\n )", "def __init__(self, *args):\n this = _libsbml.new_SBMLUri(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, loc_connections, image_connections,\n props_connections, bbox,\n external_loc=None, parent=None, main_ref=None,\n functional_manager=None):\n self.loc_connections = loc_connections\n self.image_connections = image_connections\n self.props_connections = props_connections\n self.external_loc = external_loc\n TopLevelAuxiliaryWindow.__init__(self, window_name=self.tool_name,\n parent=parent, main_ref=main_ref)\n if functional_manager is not None:\n functional_manager.loc_signal=self.loc_changed\n functional_manager.image_signal=self.image_changed\n functional_manager.props_signal=self.image_props_changed\n self.func_man = functional_manager", "def __init__(self, L, T_range):\n self.L = L\n self.spins = np.ones((L, L, len(T_range)))\n self.InitializeSpins(T_range[0])", "def __init__(self, source=None):\n super().__init__(self.COMPONENT_NAME)\n self.__links = {}\n self.__source = source", "def init():\n return _libsbml.LayoutExtension_init()", "def __init__(self, *args):\n this = _libsbml.new_Compartment(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, hass, lj, i, name):\n self._hass = hass\n self._lj = lj\n self._index = i\n self._brightness = 0\n self._name = name\n\n lj.on_load_activated(i, self._on_load_changed)\n lj.on_load_deactivated(i, self._on_load_changed)", "def __init__(self, component):\r\n self.component = component", "def __init__(self, component):\r\n self.component = component", "def __init__(self):\n\n # labjack connection handle (default: None. If connected: labjack handler instance)\n self.connection_handle = None\n\n # labjack connection state (default: None, connection_error: False, connected: True)\n self.connection_state = False\n\n # try to connect\n self.connect()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def do_component_init(self):\n logger.debug(\"RwdtstaskletPython: do_component_init function called\")\n component_handle = RwTaskletPlugin.ComponentHandle()\n return component_handle", "def __init__(self,\n env,\n initial_lid_pos,\n name,\n resource='objects/box_with_lid.xml'):\n self._env = env\n self._initial_lid_pos = initial_lid_pos\n self._name = name\n self._resource = resource", "def __init__(self):\n\n # initialize window\n self.win = graphics.GraphWin(\"Lunar Lander Game\", 300, 500)\n \n # transform coordinates\n self.win.setCoords(0, -10, 300, 600)\n\n self.surface_polygon = self.create_surface()\n self.surface_polygon.draw(self.win)\n self.background()\n \n\n self.lander_polygon = None\n # Draws two different thrust buttons\n self.b1 = Button(graphics.Point(100, 560), 80, 20, 'Thrust')\n self.b2 = Button(graphics.Point(200, 560), 80, 20, 'No Thrust')\n self.b1.draw(self.win)\n self.b2.draw(self.win)\n \n # Draws text values for altitude, velocity, and fuel\n self.alt_num = graphics.Text(graphics.Point(50, 400), 'Altitude: ')\n self.vel_num = graphics.Text(graphics.Point(50, 450), 'Velocity: ')\n self.fuel_num = graphics.Text(graphics.Point(50, 500), 'Fuel: ')\n self.alt_num.draw(self.win)\n self.vel_num.draw(self.win)\n self.fuel_num.draw(self.win)", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self, helper=None):\n self.helper = helper\n self.sp_manager = SPManager(helper.handle, helper.service_profile)", "def __init__(self):\r\n self.label = \"OVL Tools\"\r\n self.alias = \"\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [OVLtoFeature, BatchOVLtoFeature]", "def __init__(self):\n self.label = \"Toolbox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Offset]", "def __init__(self):\n self.action_server = actionlib.SimpleActionServer(\"navigate_2D_action\",\n Navigate2DAction, self.navigate_cb)\n\n self.robot_point_sub = rospy.Subscriber(\"robot/point\", Point, self.update_robot_position)\n self.robot_current_point = None\n self.robot_goal_point = None\n self.distance_threshold = 0.35\n self.feedback_rate = rospy.Rate(1)", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self.T1_LLN_Node = None\n self.T1_LLE_Node = None\n self.ECVMapNode = None\n self.LLE_Node = None\n self.LLN_Node = None\n self.ArefNode = None\n self.T1_LLE_Name = 'T1 Enhanced'\n self.T1_LLN_Name = 'T1 Native'\n self.ResetSliceViews()\n self.LinkSlices()\n self.ColorBarEnabled()\n self.setupVolumeNodeViewLayout()\n self.Warning = True", "def __init__(self, hst, iface):\n self.host = hst\n self.iface = iface", "def __init__(self):\n # Lynx Dimensions in mm\n self.L1 = 76.2 # distance between joint 0 and joint 1\n self.L2 = 146.05 # distance between joint 1 and joint 2\n self.L3 = 187.325 # distance between joint 2 and joint 3\n self.L4 = 34 # distance between joint 3 and joint 4\n self.L5 = 34 # distance between joint 4 and center of gripper\n\n # Joint limits\n self.lowerLim = np.array([-1.4, -1.2, -1.8, -1.9, -2.0, -15]).reshape((1, 6)) # Lower joint limits in radians (grip in mm (negative closes more firmly))\n self.upperLim = np.array([1.4, 1.4, 1.7, 1.7, 1.5, 30]).reshape((1, 6)) # Upper joint limits in radians (grip in mm)", "def __init__(self, portname, devicetype):\n if devicetype == DEVICE_DEBUG_BOARD:\n self.device = debugbox.DebugBox(portname)\n elif devicetype == DEVICE_SFP_BREAKOUT:\n self.device = sfpbreakout.SFP(portname)\n elif devicetype == DEVICE_DUMMY_BOARD:\n self.device = dummybox.DummyBox(portname)\n else:\n raise IOError(\"Invalid Device Type\")\n \n # Set up laser sections\n self.mirror1 = Testrig.LaserSection(self, 814, 10, 90, 728, 844, 860, 864)\n self.laser_phase = Testrig.LaserSection(self, 810, 11, 20, 726, 846, 858, 866)\n self.gain = Testrig.LaserSection(self, 818, 12, 180, 730, 848, None, None)\n# self.gain1.validator.setTop(150)\n self.mirror2 = Testrig.LaserSection(self, 826, 14, 90, 734, 850, 862, 864)\n# self.front.validator.setTop(60)\n self.soa1 = Testrig.LaserSection(self, 822, 15, 300, 736, 852, None, None)\n# self.soa1.validator.setTop(150)\n self.soa2 = Testrig.LaserSection(self, 830, 16, 100, 738, 854, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n self.phase1 = Testrig.LaserSection(self, 834, 13, 100, 732, 856, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n \n self.voltage_max = 2.5 # This is now a constant\n \n self.full_rig = True", "def __init__(self):\n this = _libsbml.new_SBMLWriter()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n # clears the console window\n if sys.platform in ('linux-i386','linux2'):\n os.system(\"clear\")\n elif sys.platform in ('win32','dos','ms-dos'):\n os.system(\"cls\")\n\n # print scripts info\n print self.WELCOME_MESSAGE\n\n # initialize all instance variables\n self.guiElements = {} # dictionary of gui elements (buttons, strings, sliders, ...)\n self.gui_events = [] # list of events\n self.gui_event_ids = {} # dictionary of event ids\n self.config = {} # configuration dictionary\n self.target = None # import or export\n self.callback = None # function to call when config gui is done\n self.texpathIndex = 0\n self.texpathCurrent = ''\n\n # reset GUI coordinates\n self.xPos = self.XORIGIN\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]\n\n # load configuration\n self.load()", "def init(self):\n\n # Configuration interface support comes with plasma\n self.setHasConfigurationInterface(False)\n\n # Aspect ratio defined in Plasma\n self.setAspectRatioMode(Plasma.IgnoreAspectRatio)\n\n # Theme is a const variable holds Applet Theme\n self.theme = Plasma.Svg(self)\n\n # It gets default plasma theme's background\n self.theme.setImagePath(\"widgets/background\")\n\n # Resize current theme as applet size\n self.theme.resize(self.size())\n\n self.mainWidget = None\n self.layout = None\n\n self.initPlasmoid()", "def __init__(self, *args):\n this = _libsbml.new_SBMLInitialAssignmentConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n # For now, we'll connect to the target via the Apollo debug controller.\n # This should be replaced by a high-speed USB link soon; but for now\n # we'll use the slow debug connection.\n self._debugger = ApolloDebugger()\n self._serial = self._find_serial_connection()", "def __init__(self):\n super().__init__()\n self.port_end = PortTerminator()", "def __init__(self, host):\n self._io = RemoteIO(host)\n self._host = host\n\n self._left_wheel = Wheel(id='b', side='left', remote_io=self._io)\n self._right_wheel = Wheel(id='a', side='right', remote_io=self._io, inverse=True)\n\n self._cam = Camera(host)\n\n self._left_led = LED(side='left', remote_io=self._io)\n self._front_led = LED(side='center', remote_io=self._io)\n self._right_led = LED(side='right', remote_io=self._io)", "def __init__(\n self, config: gym_auv.Config, init_state: np.ndarray, width: float = 4\n ) -> None:\n\n self.config = config\n\n # Initializing private attributes\n self._width = config.vessel.vessel_width\n\n self._n_sectors = self.config.vessel.n_sectors\n self._n_sensors = self.config.vessel.n_sensors_per_sector * self._n_sectors\n self._d_sensor_angle = (\n 2 * np.pi / (self._n_sensors)\n ) # radians TODO: Move to sensor?\n self._sensor_angles = np.array(\n [-np.pi + (i + 1) * self._d_sensor_angle for i in range(self._n_sensors)]\n )\n\n self._sensor_internal_indeces = []\n self._sensor_interval = max(1, int(1 / self.config.simulation.sensor_frequency))\n self._observe_interval = max(\n 1, int(1 / self.config.simulation.observe_frequency)\n )\n self._virtual_environment = None\n self._use_feasibility_pooling = config.vessel.sensor_use_feasibility_pooling\n\n # Calculating sensor partitioning\n if self._use_feasibility_pooling:\n # Initialize sectors used for sensor dimensionality reduction\n self.lidar_preprocessor = LidarPreprocessor(\n self.config, self._d_sensor_angle\n )\n else:\n self.lidar_preprocessor = None\n\n # Calculating feasible closeness\n if self.config.vessel.sensor_log_transform:\n self._get_closeness = lambda x: 1 - np.clip(\n np.log(1 + x) / np.log(1 + self.config.vessel.sensor_range), 0, 1\n )\n else:\n self._get_closeness = lambda x: 1 - np.clip(\n x / self.config.vessel.sensor_range, 0, 1\n )\n\n # Initializing vessel to initial position\n self.reset(init_state)", "def __init__(self):\n _snap.TStdOut_swiginit(self, _snap.new_TStdOut())", "def __init__(self, universe=None, localname=None, local_id=None, \n container=None):\n if container:\n assert isinstance(container, (Universe, Area))\n if not universe:\n universe = container if isinstance(container, Universe) \\\n else container.universe\n SmartHomeItem.__init__(self, universe, localname, local_id, \n container)\n self._initialized = False # also written to by metaclass\n self._state_entities = set()\n self.universe._registerdevice(self)", "def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def __init__(self,master,socket):\n \n self.current = \"\"\n self.roomList = {}\n self.socket = socket\n self.menu = tki.Menu(master,tearoff=0)\n self.menu.add_command(label = \"Join\", command = lambda: self.join(self.current))\n self.menu.add_command(label = \"Close Menu\", command = self.menu.unpost)", "def __init__(__self__, *,\n endpoint_port: pulumi.Input[int],\n listener_port: pulumi.Input[int]):\n pulumi.set(__self__, \"endpoint_port\", endpoint_port)\n pulumi.set(__self__, \"listener_port\", listener_port)", "def __init__(self,address = None):\n\t\t# I really should do some validation around here\n\t\n\t\tif address != None:\n\t\t\tself.connect(address)", "def init_lens(self):\n\n response = self.send_lens_cmd(['00'], fast_mode=False)\n response = self.send_lens_cmd(['0A', '00'], fast_mode=False)\n\n if response['MISO'][1] != 'AA':\n print(response['return_str'])\n raise RuntimeError('Lens initialisation failed')\n\n response = self.send_lens_cmd(['0A', '00'], fast_mode=True)\n\n cmd = ['80', '0A']\n for n in range(10):\n cmd.append('00')\n\n response = self.send_lens_cmd(cmd, fast_mode=True)\n\n self._min_FL = int('0x' + response['MISO'][4], 16)\n self._max_FL = int('0x' + response['MISO'][6], 16)\n\n if self.min_FL == self.max_FL:\n self.lens_desc = '{} mm prime lens'.format(self.min_FL)\n else:\n self.lens_desc = '{}-{} mm tele lens'.format(self.min_FL, self.max_FL)\n\n print('initialised {}'.format(self.lens_desc))", "def new_ll():\n from linked_list import Linked_List\n this_ll = Linked_List(PARAMS_SAMPLE_LIST)\n return this_ll", "def __init__(self):\n self.layer_scope = None\n self.out = None", "def __init__(self):\n super().__init__()\n self.texture = arcade.load_texture(\":resources:/images/enemies/slimeBlue.png\")\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def init( self ):\n\t\treturn self", "def __init__(self):\r\n Device.__init__(self)\r\n\r\n self.menu.addAction(\"Restart\", self.restart)\r\n self.menu.addAction(\"Stop\", self.terminate)", "def __init__(self):\n node = ListNode(0) # dummy\n self.head = node\n self.tail = node\n self.len = 0", "def __init__(self):\n\n self.__main_window = None\n self.__main_display_table = None\n self.remote_stop = False\n\n self.__start_time = None\n self.__broadcast_entry = None\n self.__broadcast_label = None\n self.__broadcast_button = None\n self.__active_lines_stringvar = None\n self.__active_buses_stringvar = None\n self.__number_of_people_stringvar = None\n self.__session_time_stringvar = None\n self.__free_text_stringvars_dict = dict() #holds all the stringvars needed for the bus messages\n self.__font_name = \"Bahnschrift SemiBold SemiConden\"\n #coordinates for groups of icons on the screen\n self.__main_buttons_coords = {\"x\": 458, \"y\": 647}\n self.__statistics_coords = {\"x\": 348, \"y\": 690}\n self.__admin_controls_coords = {\"x\": 459, \"y\": 777}\n self.__broadcast_coords = {\"x\": 22, \"y\": 356}\n self.__messages_coords = {\"x\": 58, \"y\": 56}\n self.__table_coords = {\"x\": 448, \"y\": 16, \"width\": 620, \"height\": 566}", "def __init__(self):\n self.label = \"Data Assistant\"\n self.alias = \"dla\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Append, Stage, NewFile, Preview, Replace]", "def __init__(self):\n self.output = []\n self.ctl = clingo.Control() # Control object for the grounding/solving process", "def __init__(self, port):\n pjGateWay.__init__(self, port)\n self.ga = self.way.entry_point", "def __init__(self, *args):\n this = _libsbml.new_InitialAssignment(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_LocalParameter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__():\n self.placa = placa", "def __init__(self):\n self.model = Model()\n self.view = View()\n\n self.server = Server(msg_handler=self.msg_handler,\n err_handler=self.err_handler,\n conn_handler=self.conn_handler,\n quit_handler=self.quit_handler)\n self.server.start()\n\n self.view.frame.onclose(self.server.close)", "def __init__(self, output_shape, name):\n super(LSTMBase, self).__init__(name)\n\n with self._BlockScope():\n self._output_shape = [None] + list(output_shape)\n self._hidden = None\n self._cell = None", "def __init__(\n self,\n lab: Lab,\n lid: str,\n iface_a: Interface,\n iface_b: Interface,\n label: Optional[str] = None,\n ) -> None:\n self._id = lid\n self._interface_a = iface_a\n self._interface_b = iface_b\n self._label = label\n self.lab = lab\n self._session: httpx.Client = lab.session\n self._state: Optional[str] = None\n self._stale = False\n self.statistics = {\n \"readbytes\": 0,\n \"readpackets\": 0,\n \"writebytes\": 0,\n \"writepackets\": 0,\n }", "def __init__(self, name='demo'):\n init()\n joystick.init()\n for i in range(joystick.get_count()):\n joystick.Joystick(i).init()\n\n State.game = util.load_cfg(name)\n State.clock = Clock(10, State.game['frame_rate'])\n State.window = display.set_mode(State.game['screen_size'])\n\n self._last_joystick_action = None\n self.create_screens()" ]
[ "0.5946556", "0.5688111", "0.55620337", "0.55175424", "0.53838944", "0.5277127", "0.52567214", "0.52559364", "0.5208461", "0.5208461", "0.51927763", "0.51246226", "0.50718874", "0.5064975", "0.5049424", "0.5043597", "0.50424546", "0.50129515", "0.5001687", "0.49760997", "0.49708754", "0.49548873", "0.49347582", "0.4921544", "0.4921092", "0.49127913", "0.48999384", "0.4887002", "0.48794746", "0.48781332", "0.4872631", "0.48682398", "0.48618823", "0.4855835", "0.485049", "0.48429015", "0.483812", "0.48299578", "0.48273122", "0.48264602", "0.4825413", "0.4823307", "0.48185137", "0.48004586", "0.48000652", "0.47977194", "0.4797199", "0.47946215", "0.4789649", "0.4779821", "0.47609058", "0.47609058", "0.4731964", "0.47318566", "0.47241485", "0.4720133", "0.4717972", "0.47157672", "0.47157672", "0.4712701", "0.4712567", "0.4696351", "0.46914762", "0.4689413", "0.46849853", "0.46791828", "0.46779716", "0.46737906", "0.46726105", "0.46708456", "0.4670538", "0.4669967", "0.46679696", "0.4667726", "0.46670458", "0.46637672", "0.46560782", "0.46549445", "0.46515617", "0.46460718", "0.46448612", "0.4644807", "0.46288875", "0.46275762", "0.46270648", "0.4617352", "0.46160778", "0.46153685", "0.46119568", "0.46051115", "0.4598521", "0.45971617", "0.45796013", "0.45778236", "0.45729458", "0.45727503", "0.45726135", "0.4565994", "0.45619616", "0.45605457" ]
0.58739966
1
This function builds a dictionary of managers to manager nodes.
def buildHierarchy(self, test_input): for entry in test_input: if entry['manager']not in self.relations: self.relations[entry['manager']] = Node(entry['manager'], entry['name']) else: self.relations[entry['manager']].employees.append(entry['name'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_managers():\n return {'managers': get_users('managers')}", "def map_uses(self):\n out = {}\n for node in self.nodes.values():\n baddies = set()#track incomplete connections and relegate to attributes\n for rtype, dest in node.outgoing_relations:\n try:\n self.nodes[dest].add_predecessor(rtype, node.name)\n out.setdefault(rtype, set()).add((node.name, dest))\n except KeyError:\n baddies.add((rtype, dest))\n for rtype, dest in baddies:\n node.remove_relation(rtype, dest)\n node.add_attribute(rtype, dest)\n\n atc = node.attributes.copy()\n #check if any attributes have corresponding nodes\n for atype, attrib in atc:\n if attrib in self.nodes:\n node.remove_attribute(atype, attrib)\n node.add_relation(atype, attrib)\n self.nodes[attrib].add_predecessor(atype, node.name)\n out.setdefault(atype, set()).add((node.name, attrib))\n \n return out", "def build_graph(nodes):\n\n job_instances_map = {}\n\n # first create node structure\n nodes_map = {}\n root_nodes = []\n for node in nodes:\n new_node = JobGraphNode(node, job_instances_map)\n nodes_map[node.id] = new_node\n # check if it is root node\n try:\n node.relationships.next()\n except StopIteration:\n root_nodes.append(new_node)\n\n # then set relationships\n for _, child in nodes_map.iteritems():\n for relationship in child.cfy_node.relationships:\n parent = nodes_map[relationship.target_node.id]\n parent.add_child(child)\n child.add_parent(parent)\n\n return root_nodes, job_instances_map", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def __build_nodes(self):\n self.components = {}\n\n for node in self.get_nodes():\n # Create the node\n assert node not in self.components, \"Node %s already exists\" % node.name\n self.components[node] = Node(name=node,\n node=self.graph.nodes[node],\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n # Add the new components\n self.components.update(self.components[node].get_components())", "def _get_namespace_manager_dict(keys):\n return {k: getattr(namespace_manager, k)() for k in keys}", "def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes", "def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )", "def generate_tree(self):\n tree = {\n \"name\": 'Agents',\n \"children\": [\n {\n \"name\": \"Transports\",\n \"count\": \"{}\".format(len(self.transport_agents)),\n \"children\": [\n {\n \"name\": \" {}\".format(i.name.split(\"@\")[0]),\n \"status\": i.status,\n \"icon\": \"fa-taxi\"\n } for i in self.transport_agents.values()\n ]\n },\n {\n \"name\": \"Customers\",\n \"count\": \"{}\".format(len(self.customer_agents)),\n \"children\": [\n {\n \"name\": \" {}\".format(i.name.split(\"@\")[0]),\n \"status\": i.status,\n \"icon\": \"fa-user\"\n } for i in self.customer_agents.values()\n ]\n },\n\n ]\n }\n return tree", "def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info", "def construct_trees(self, nodes):\n trees = {}\n for root in tqdm.tqdm(nodes):\n # note that nodes is an uniquely ordered set\n # tree = {0: {0 : [nb_1, nb_2, ..., nb_k], nb_1: [0, ...]}, 1 : {1: [nb_1,...], nb_1 : [..]},...}\n trees[root] = {}\n trees[root][root] = [root]\n # print('test...', trees[root][root])\n used_nodes = set()\n # queue has the form as following queue([root] for root in tqdm.tqdm(nodes)\n # with each node, we construct the tree rooted at that node, denoted as queue(['root'])\n queue = collections.deque([root]) # deque([0]) -> deque([0,1])\n while len(queue) > 0:\n cur_node = queue.popleft()\n used_nodes.add(cur_node)\n for sub_node in self.graph[cur_node]:\n # sub_node is not ordered\n if sub_node not in used_nodes:\n trees[root][cur_node].append(sub_node)\n trees[root][sub_node] = [cur_node]\n queue.append(sub_node)\n used_nodes.add(sub_node)\n return trees", "def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}", "def build_dependency_tree(nodes):\n\n return { k: deps_for(nodes, k) for k in list(nodes.keys()) }", "def node_encoder_dict():\n\n from .encoders import geometric, mixed\n from mlreco.models.layers.gnn.encoders.cnn import ClustCNNMinkNodeEncoder\n # from mlreco.models.scn.gnn.encoders.cnn import ClustCNNNodeEncoder\n\n encoders = {\n \"geo\" : geometric.ClustGeoNodeEncoder,\n \"mix_debug\" : mixed.ClustMixNodeEncoder,\n \"cnn\": ClustCNNMinkNodeEncoder\n }\n\n return encoders", "def make_unpack_map(node):\n return dict(zip(node.names, node.iternodes()))", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def __init__(self, nodes):\n self.parents = {}\n self.ranks = {}\n\n for node in nodes:\n self.parents[node] = node\n self.ranks[node] = 0", "def node_mapping(self):\n ...", "def _make_ordered_node_map(\n pipeline: p_pb2.Pipeline\n) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]':\n node_map = collections.OrderedDict()\n for pipeline_or_node in pipeline.nodes:\n node_id = pipeline_or_node.pipeline_node.node_info.id\n node_map[node_id] = pipeline_or_node.pipeline_node\n return node_map", "def get_dict(MGs, dist_AMT, Tcnt, lcnt, GA_param):\n for i in range(0, Tcnt):\n for j in range(0, GA_param-1):\n if j==0:\n MGs['tree-'+str(i+1)]['dict_'+str(j+1)] = map_nodes(MGs['tree-'+str(i+1)]['dist_'+str(j+1)], dist_AMT, lcnt, 'none')\n else:\n MGs['tree-'+str(i+1)]['dict_'+str(j+1)] = map_nodes(MGs['tree-'+str(i+1)]['dist_'+str(j+1)], MGs['tree-'+str(i+1)]['dist_'+str(j)], lcnt, 'none')\n return MGs", "def get_management_software_property_name_map(cls):\n return cls._NODE_PROPERTY_NAMES", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def node_catalogue():\r\n\r\n classes = node_subclasses(Node)\r\n\r\n catalogue = {}\r\n\r\n for node_class in classes:\r\n try:\r\n name = node_class.identifier()\r\n except AttributeError:\r\n # If node does not provide identifier, we consider it to be\r\n # private or abstract class\r\n continue\r\n\r\n # Get copy of node info\r\n info = dict(get_node_info(node_class))\r\n info[\"name\"] = name\r\n info[\"factory\"] = node_class\r\n\r\n # Get node type based on superclass, if not provided\r\n\r\n if \"type\" not in info:\r\n if issubclass(node_class, SourceNode):\r\n info[\"type\"] = \"source\"\r\n elif not issubclass(node_class, SourceNode) \\\r\n and not issubclass(node_class, TargetNode):\r\n info[\"type\"] = \"processing\"\r\n elif issubclass(node_class, TargetNode):\r\n info[\"type\"] = \"target\"\r\n else:\r\n info[\"type\"] = \"unknown\"\r\n\r\n catalogue[name] = info\r\n\r\n return catalogue", "def add_managers(self, managers: Union[List[Any], Tuple[Any]]):\n for m in self._flatten(managers):\n self.apply_configuration_defaults(m)\n self._managers.add(m)", "def manager_factory(manager_type):\n return {\n 'web': WebManager,\n 'github': GitHubManager,\n 'apkdownloadmirror': ApkDownloadMirrorManager,\n 'apkplz': ApkPlzManager,\n }[manager_type]", "def configure_node_managers(config):\n host_names = get_compute_node_host_names(config)\n sysctl_settings = unflatten_dict_keys(config, 'sysctl_(.*)')\n sys_settings = unflatten_dict_keys(config, '(/sys/.*)')\n pjobs = [delayed(configure_node_manager)(host_name, sysctl_settings, sys_settings, config.get('transparent_hugepage_enabled')) for host_name in host_names]\n Parallel(n_jobs=len(pjobs))(pjobs)", "def _build_generic_nodes_dict(self, graph, padding='A'):\n nodes_dict = {}\n for node, data in graph.nodes_iter(data=True):\n nodes_dict.update({node: padding})\n return nodes_dict", "def get_graph_dictionary(self):\n nodes = {}\n n = 0\n for node in self.__nodes:\n nodes[n] = tuple(node.get_data())\n n += 1\n\n edges = set()\n for edge in self.__edges:\n new_edge = (edge.get_node_a().get_id(), edge.get_node_b().get_id())\n edges.add(new_edge)\n\n graph_dict = {}\n graph_dict[\"nodes\"] = nodes\n graph_dict[\"edges\"] = edges\n\n return graph_dict", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def _build_nodes_dict(self, graph):\n nodes_dict = {}\n for node, data in graph.nodes_iter(data=True):\n nodes_dict.update({node: data['label']})\n return nodes_dict", "def create_nodes(self):\n # Create a special dictionary that will raise an error if a key is\n # updated. This avoids the\n nodes = NodeDict()\n\n return create_solph_nodes_from_data(self.input_data, nodes)", "def nodes_inventory(self):\n\n if not self.nodes:\n self.get_nodes()\n\n _nodes_inventory = {}\n _server = urlparse(self.connector.base_url).hostname\n\n for _n in self.nodes:\n\n _nodes_inventory.update(\n {\n _n.name: {\n \"server\": _server,\n \"name\": _n.name,\n \"console_port\": _n.console,\n \"console_type\": _n.console_type,\n \"type\": _n.node_type,\n \"template\": _n.template,\n }\n }\n )\n\n return _nodes_inventory", "def get_worker_nodes(self):\n worker_nodes_count = input('enter number of worker nodes\\n'\n 'default [2]: ')\n default = 2\n worker_nodes_count = set_values(worker_nodes_count, default, check='integer')\n worker_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['worker_nodes'] = []\n for num in range(worker_nodes_count):\n worker_values = []\n default = 'worker-{}'.format(num)\n worker_name = input('enter the worker {} node name\\n'\n 'default [{}]: '.format(num, default))\n worker_name = set_values(worker_name, default)\n worker_ip = get_ip(node_name=worker_name, ip_type='os')\n worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')\n worker_values.append(worker_name)\n worker_values.append(worker_ip)\n worker_values.append(worker_mac)\n worker_node_dict_pairs = dict(zip(worker_keys, worker_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,\n worker_ip, worker_mac)) \n self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count", "def nodeNames(self):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getNodeNames()\n else:\n return [self.node, \"system-manager\"]", "def _create_ports_per_numa(node, interfaces):\n\n # Make a list of ports by numa node\n ports_per_numa = {}\n for item in interfaces.items():\n i = item[1]\n if i[\"numa_node\"] not in ports_per_numa:\n ports_per_numa[i[\"numa_node\"]] = {\"interfaces\": []}\n ports_per_numa[i[\"numa_node\"]][\"interfaces\"].append(i)\n else:\n ports_per_numa[i[\"numa_node\"]][\"interfaces\"].append(i)\n node[\"cpu\"][\"ports_per_numa\"] = ports_per_numa\n\n return ports_per_numa", "def preprocess(nodes_info_str):\n\tnodeslist = re.findall(r\"node\\d+.*\\n(?: \\w+\\s*=\\s*.*\\n)*\", nodes_info_str)\n\tnodesdict = {}\n\tfor node in nodeslist:\n\t\tname = re.match(r\"(node\\d+).*\", node).group(1)\n\t\ttotal = int(re.search(r\"np = (\\d+)\", node).group(1))\n\t\tsearch = re.search(r\" jobs = (.*)\", node)\n\t\tif not search: remain = total; users = set()\n\t\telse: \n\t\t\tremain = total - (search.group(1).count(\",\") + 1)\n\t\t\tusers = set(re.findall(r\"\\d+/(\\d+).*?[,$]\", search.group(0)))\n\t\tif re.search(r\"state\\s*=\\s*down\", node): remain = \"--\"\n\t\tnodesdict[name] = {\"total\": total, \"remain\": remain, \"users\": users}\n\treturn nodesdict", "def get_leaf_swithnames(self):\n switchnames = {}\n with open('setup1.json', 'r') as f:\n json_text = f.read()\n setup = json.loads(json_text)\n for value in setup['topology']['nodes']:\n if value['role'] == \"LEAF\":\n switchnames.update({value['mgmt_ip_address'] : value['name']})\n return switchnames", "def build_dict_from(cls, trees):\n allnames = set()\n alllabels = set()\n suffixes = [cls.suffix_sep + cls.leaf_suffix, cls.suffix_sep + cls.last_suffix,\n \"{}{}{}{}\".format(cls.suffix_sep, cls.leaf_suffix, cls.suffix_sep, cls.last_suffix)]\n for tree in trees:\n treenames, treelabels = tree._all_names_and_labels()\n allnames.update(treenames)\n alllabels.update(treelabels)\n if len(alllabels) == 1 and alllabels == {None} or len(alllabels) == 0:\n alltokens = allnames\n else:\n alltokens = set(sum([[token+\"/\"+label for label in alllabels] for token in allnames], []))\n\n indic = OrderedDict([(\"<MASK>\", 0), (\"<START>\", 1), (\"<STOP>\", 2),\n (cls.root_symbol, 3), (cls.none_symbol, 4)])\n outdic = OrderedDict()\n outdic.update(indic)\n offset = len(indic)\n alltokens = [\"<RARE>\"] + sorted(list(alltokens))\n numtokens = len(alltokens)\n newidx = 0\n for token in alltokens:\n indic[token] = newidx + offset\n newidx += 1\n numtokens = len(alltokens)\n newidx = 0\n for token in alltokens:\n outdic[token] = newidx + offset\n for i, suffix in enumerate(suffixes):\n outdic[token +\n suffix] = newidx + offset + (i + 1) * numtokens\n newidx += 1\n return indic, outdic", "def get_nodes_info(graph):\n nodes = collections.defaultdict(lambda: {\n 'name': None,\n 'sg': None,\n 'up': set(),\n 'down': set(),\n })\n for node in graph.get_nodes():\n name = node.get_name()\n nodes[name]['name'] = name.strip('\"')\n for sg in graph.get_subgraphs():\n sgname = sg.get_name().strip('\"')\n if sgname.startswith('cluster_'):\n sgname = sgname[8:]\n sgname = sgname.replace('__', '.').replace('_dash_', '-')\n for node in sg.get_nodes():\n name = node.get_name()\n nodes[name]['name'] = name.strip('\"')\n nodes[name]['sg'] = sgname\n return dict(nodes)", "def get_master_nodes(self):\n default = 3\n master_nodes_count = input('enter number of master nodes\\n'\n 'default [3]: ')\n master_nodes_count = set_values(master_nodes_count, default, check='integer')\n master_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['master_nodes'] = []\n for num in range(master_nodes_count):\n master_values = []\n default = 'etcd-{}'.format(num)\n master_name = input('enter the master {} node name \\n'\n 'default [{}]: '.format(num, default))\n master_name = set_values(master_name, default)\n master_ip = get_ip(node_name=master_name, ip_type='os')\n master_mac = get_network_device_mac(node_name=master_name, ip_type='idrac')\n master_values.append(master_name)\n master_values.append(master_ip)\n master_values.append(master_mac)\n master_node_dict_pairs = dict(zip(master_keys, master_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(master_name, master_name,\n master_ip, master_mac)) \n self.inventory_dict['csah']['vars']['master_nodes'].append(master_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_masters'] = master_nodes_count", "def allocate(self, models, edges, nrnodes, totalActivities):\n # Return something of the form: {0: 0, 1: 0, 2: 0, 3: 1}\n # To allocate model_ids 0, 1 and 2 to node 0 and model_id 3 to node 1\n return {0: 0, 1: 0, 2: 0, 3: 1}", "def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped", "def register_manager(self, update, context):\r\n new_manager_chat_id = update['message']['chat']['id']\r\n new_manager_name = update['message']['chat']['first_name']\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n managers[new_manager_name] = new_manager_chat_id\r\n\r\n with open('managers.json', 'w') as obj:\r\n json.dump(managers, obj)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'{new_manager_name} - {new_manager_chat_id}')", "def find_cluster_managers_ips(resource, event, trigger, **kwargs):\n\n nsxlib = utils.get_connected_nsxlib()\n manager_ips = nsxlib.cluster_nodes.get_managers_ips()\n LOG.info(\"NSX Cluster has %s manager nodes:\", len(manager_ips))\n for ip in manager_ips:\n LOG.info(\"%s\", str(ip))", "def _map_metanodes_to_metaedges(self):\n # look through all metanodes\n for kind in self.metanode_to_ids.keys():\n\n # Nodes are abbreviated in Metaedge Abbrevioations\n n_abbrev = self.metagraph.kind_to_abbrev[kind]\n\n metanode_edges = dict()\n # Find out which Metanodes this Metaedge participates in\n for e in self.metaedges:\n parsed = gt.parse_edge_abbrev(e)\n if n_abbrev in parsed:\n # want to know if our metaedge of interest is the start or end of this metanode...\n metanode_edges[e] = {'start': parsed[0] == n_abbrev}\n self.metanode_to_edges[kind] = metanode_edges", "def create_package_dict(self):\n dep_node = list()\n param_list = ['name', 'version', 'dir', 'description']\n inp_list = list()\n dep_node_list = list()\n pkg_dict = dict()\n for line in self.full_ed_lines:\n inp_list.append(line.text())\n dep_pkg = inp_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n msg, msg_type = dep.split('/')\n dep_node_list.append({'name': msg, 'type': msg_type})\n for param, value in zip(param_list, inp_list):\n pkg_dict[param] = value\n pkg_dict['maintainer'] = {'name': inp_list[4], 'email': inp_list[5]}\n pkg_dict['depend'] = dep_pkg\n pkg_dict['node'] = dict()\n pkg_dict['node']['name'] = inp_list[7]\n pkg_dict['node']['depend'] = dep_node_list\n pkg_dict['node']['subscribers'] = self.manager.wid.sub_list\n pkg_dict['node']['publishers'] = self.manager.wid.pub_list\n return pkg_dict", "def build(self):\n self.logger.info('Rebuilding adjacency information')\n self.edges = collections.defaultdict(list)\n\n topic_to_publisher = collections.defaultdict(list)\n topic_to_subscribers = collections.defaultdict(list)\n node_to_missing_deps = collections.defaultdict(list)\n\n result = True\n\n for node in self.nodes.values():\n for topic in node.provided_topics.keys():\n topic_to_publisher[topic].append(node)\n\n for topic in node.required_topics:\n topic_to_subscribers[topic].append(node)\n\n for dep in node.additional_dependencies:\n if dep not in self.nodes:\n node_to_missing_deps[node].append(dep)\n\n if len(node_to_missing_deps) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] managed processes with missing dependencies'.format(len(node_to_missing_deps)), file=msg)\n fmt = ' Managed process [{}] is missing [{}]'\n\n for (node, missing) in node_to_missing_deps.items():\n print(fmt.format(node.name, ', '.join(missing)), file=msg)\n self.logger.error(msg.getvalue())\n\n missing_publishers = []\n for topic in topic_to_subscribers.keys():\n if topic not in topic_to_publisher:\n missing_publishers.append(topic)\n\n if len(missing_publishers) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] topics that do not have publishers'.format(len(missing_publishers)), file=msg)\n fmt = ' Topic [{}] with subscribers [{}]'\n\n for topic in missing_publishers:\n print(fmt.format(topic, ', '.join([x.name for x in topic_to_subscribers[topic]])), file=msg)\n self.logger.error(msg.getvalue())\n\n if not result:\n self.logger.error('Found errors when building adjacency information')\n raise GraphBuildError(\n 'Found errors when building adjacency information / graph edges. Check log for details')\n\n # Now we have enough information to build our edges. Phase 1: pub/sub stuff\n for (topic, subscribers) in topic_to_subscribers.items():\n publishers = topic_to_publisher[topic]\n\n for p in publishers:\n for s in subscribers:\n self.edges[p].append(s)\n\n # Phase 2: additional dependencies\n for node in self.nodes.values():\n for dep in node.additional_dependencies:\n src = self.nodes[dep]\n self.edges[src].append(node)", "def get_edgeleaf_swithnames(self):\n switchnames = {}\n with open('setup1.json', 'r') as f:\n json_text = f.read()\n setup = json.loads(json_text)\n for value in setup['topology']['nodes']:\n if value['role'] == \"EDGE_LEAF\":\n switchnames.update({value['mgmt_ip_address'] : value['name']})\n return switchnames", "def generate_inventory(baremetal_info, server_info):\n\n hosts = defaultdict(list)\n hosts_meta = {}\n\n for node in baremetal_info:\n if node['Provisioning State'].lower() == 'active':\n role = re.findall('.*profile:(compute|control)', node['Properties']['capabilities'])[0]\n for server in server_info:\n if server['ID'] == node['Instance UUID']:\n node_ip = re.findall('.+=(\\d+.\\d+.\\d+.\\d+)$', server['Networks'])[0]\n hosts[role].append(node_ip)\n # To match ssh.cfg.j2 template\n hosts_meta[node_ip] = {'ansible_ssh_host': node_ip,\n 'ansible_user': 'heat-admin'}\n\n for host in hosts:\n hosts[host].sort()\n\n return {'hosts': hosts, 'hosts_meta': hosts_meta}", "def _create_graph(self, pools: List[Pool]):\n for pool in pools:\n self._add_nodes(pool.tokens)\n\n for pool in pools: # noqa: WPS440,WPS441\n self._add_edges(pool) # noqa: WPS441", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def _select_implementations(self, graph, memory_manager):\n implementations = {}\n for node in graph.nodes:\n choices = []\n for op in OperationRegistry.get_ops(node.op_type):\n candidate = op.create(node, graph, memory_manager)\n if candidate is not None:\n choices.append(candidate)\n\n if len(choices) >= 1:\n implementations[node] = choices[0]\n else:\n implementations[node] = None\n\n return implementations", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def _build_referrer_map(self):\n result = {}\n\n dag_list = self._build_keyed_workflow_map().items()\n\n for (referrer, dag) in dag_list:\n for sub_dag in dag.get('sub_dags', []):\n result.setdefault(sub_dag['target'], [])\n result[sub_dag['target']].append((referrer, sub_dag['name']))\n\n for generator in dag.get('generators', []):\n result.setdefault(generator['target'], [])\n result[generator['target']].append(\n (referrer, generator['name']))\n\n return result", "def construct_trees_with_mp(self, nodes):\n\n cores = multiprocessing.cpu_count() // 2\n pool = multiprocessing.Pool(cores)\n new_nodes = []\n n_node_per_core = self.n_node // cores\n for i in range(cores):\n if i != cores - 1:\n new_nodes.append(nodes[i * n_node_per_core: (i + 1) * n_node_per_core])\n else:\n new_nodes.append(nodes[i * n_node_per_core:])\n self.trees = {}\n trees_result = pool.map(self.construct_trees, new_nodes)\n for tree in trees_result:\n self.trees.update(tree)", "def node_dictionary():\r\n\r\n classes = node_subclasses(Node)\r\n dictionary = {}\r\n\r\n for c in classes:\r\n try:\r\n name = c.identifier()\r\n dictionary[name] = c\r\n except AttributeError:\r\n # If node does not provide identifier, we consider it to be\r\n # private or abstract class\r\n pass\r\n\r\n return dictionary", "def nodes_in_components(\n components: DefaultDict[int, int]\n) -> DefaultDict[int, List]:\n content = defaultdict(list)\n for node, comp in components.items():\n content[comp].append(node)\n return content", "def make_mapping(items):\n compid, nodes = items\n nodes = list(nodes)\n base_node = min(nodes)\n return [(node,base_node) for node in nodes if node != base_node]", "def create_exporters(self):\n for node_cfg in self.node_cfg_list:\n self.create_node(node_cfg)", "def _init_nodes(self, nodes):\n attributes = self.get_node_attributes()\n for node in nodes:\n if not self._is_node_added(node):\n self._nodes.append(self._get_node_as_dictionary(node, attributes))", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def __create_d_map(self):\n goal_map = {}\n # collect all goal nodes\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n if node.borders_tile_of_type(Quarantine):\n goal_map[node.get_name()] = (i, j)\n # calculate distance to closest goal node for each node\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n distances = [\n abs(i - y) + abs(j - x)\n for node_name, (y, x) in goal_map.items()\n ]\n self.d_map[node.get_name()] = min(distances)", "def _CreateAdjacencyListGraph(self):\n graph = dict()\n for nodes in self.nodes:\n graph[nodes[0]] = set()\n for edges in self.edges:\n graph[edges[0]].add(edges[1])\n return graph", "def infomap_communities(G):\n name_map = {}\n name_map_inverted = {}\n for n in G.nodes():\n id_ = hash(n) % 100000\n name_map_inverted[id_] = n\n name_map[n] = id_\n \n infomapSimple = infomap.Infomap(\"--two-level\")\n network = infomapSimple.network()\n \n for n1, n2, data in G.edges(data=True):\n network.addLink(name_map[n1], name_map[n2], data['weight'] if 'weight' in data else 1)\n\n infomapSimple.run()\n\n return dict(\n (name_map_inverted[node.physicalId], node.moduleIndex())\n for node in infomapSimple.iterTree()\n if node.isLeaf()\n )", "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def parse_graph_nodes(graph_def):\n name_to_node = {}\n for node_def in graph_def.node:\n name_to_node[node_def.name] = node_def\n return name_to_node", "def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)", "def make_complete_graph(num_nodes):\r\n if num_nodes < 1:\r\n return dict()\r\n else:\r\n new_dict = dict()\r\n for node in range(num_nodes):\r\n other_nodes = range(num_nodes)\r\n other_nodes.pop(node)\r\n new_dict[node]=set(other_nodes)\r\n return new_dict", "def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body", "def build(cls, m, data):\n # TODO\n data = sorted(data, key=lambda x: x[0]) # Sort pairs by key.\n nodes = {} # Holds nodes and they governing value as key.\n\n while True:\n # Split into chunks of size m\n chunks = [data[i:i+m] for i in range(0, len(data), m)]\n data = []\n for chunk in chunks:\n parent = chunk.pop()\n data.append(parent)\n node = BTreeNode(m)\n node.keys = map(lambda i: i[0], chunk)\n node.values = map(lambda i: i[0], chunk)\n nodes[parent[0]] = node", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def gmaps_optical_nodes(request):\n # Cypher query to get all cables with cable type fiber that are connected\n # to two optical node.\n q = \"\"\"\n MATCH (cable:Cable)\n WHERE cable.cable_type = \"Dark Fiber\"\n MATCH (cable)-[Connected_to]->(port)\n WITH cable, port\n MATCH (port)<-[:Has*0..]-(equipment)\n WHERE (equipment:Optical_Node) AND NOT equipment.type =~ \"(?i).*tss.*\"\n WITH cable, port, equipment\n MATCH p2=(equipment)-[:Located_in]->()<-[:Has*0..]-(loc)\n WHERE (loc:Site)\n RETURN cable, equipment, loc\n \"\"\"\n result = nc.query_to_list(nc.graphdb.manager, q)\n nodes = {}\n edges = {}\n for item in result:\n node = {\n 'name': item['equipment']['name'],\n 'url': helpers.get_node_url(item['equipment']['handle_id']),\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n coords = {\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n edge = {\n 'name': item['cable']['name'],\n 'url': helpers.get_node_url(item['cable']['handle_id']),\n 'end_points': [coords]\n }\n nodes[item['equipment']['name']] = node\n if item['cable']['name'] in edges:\n edges[item['cable']['name']]['end_points'].append(coords)\n else:\n edges[item['cable']['name']] = edge\n response = HttpResponse(content_type='application/json')\n json.dump({'nodes': list(nodes.values()), 'edges': list(edges.values())}, response)\n return response", "def get_nodes(self):\n if self.nodes:\n return self.nodes\n keys = self.keys.keys()\n\n self.nodes['@'] = Node('@')\n for key in keys:\n self.nodes[key] = Node(key)\n\n maze = self.explore(self.start, 0, with_keys=keys)\n for c, pos in self.keys.items():\n self.nodes['@'].connect(self.nodes[c], maze[pos])\n\n for key, key_pos in self.keys.items():\n maze = self.explore(key_pos, 0, with_keys=keys)\n for otherkey, pos in self.keys.items():\n if key != otherkey:\n self.nodes[key].connect(self.nodes[otherkey], maze[pos])", "def aggregate_maps(self, G=[], node_tags={}):\n\n self.map = nx.DiGraph() \n \n # loop over graphs in list of graphs\n for g in G:\n # add nodes to graph\n self.map.add_nodes_from(g.nodes())\n # loop over edges \n for edge in g.edges(data=True):\n # if edge is already present, add weight to edge\n if self.map.has_edge(edge[0],edge[1]):\n self.map[edge[0]][edge[1]]['weight'] += edge[2]['weight']\n # if edge is not already present, add it with weight=1\n else:\n self.map.add_edge(edge[0],edge[1], weight=1)\n \n # relabel nodes according to mapping provided by 'node_tags'\n nx.set_node_attributes(self.map, name = 'node_label', values = node_tags) \n nx.relabel_nodes(self.map, mapping=node_tags, copy=False)\n\n # assign a random color to each node\n colour_list = np.random.choice( list(colors.get_named_colors_mapping().values()), len(self.map) )\n colour_dict = dict( zip(self.map.nodes, colour_list) )\n nx.set_node_attributes(self.map, name = 'node_color', values = colour_dict)\n\n # save node attributes to CoMap object\n self.node_labels = nx.get_node_attributes(self.map, name = 'node_label')\n self.node_colors = nx.get_node_attributes(self.map, name = 'node_color')\n \n return", "def make_complete_graph(num_nodes):\n\tif num_nodes <= 0:\n\t\treturn {}\n\tdict_graph = {}\n\tfor node in range(num_nodes):\n\t\tnode_set = set()\n\t\tfor neighbor in range(num_nodes):\n\t\t\tif node != neighbor:\n\t\t\t\tnode_set.add(neighbor)\n\t\tdict_graph[node] = node_set\n\n\treturn dict_graph", "def make_complete_graph(num_nodes):\n\tif num_nodes <= 0:\n\t\treturn {}\n\telse:\n\t\tdict_graph = {}\n\t\tfor node in range(num_nodes):\n\t\t\tnode_set = set()\n\t\t\tfor neighbor in range(num_nodes):\n\t\t\t\tif node != neighbor:\n\t\t\t\t\tnode_set.add(neighbor)\n\t\t\tdict_graph[node] = node_set\n\n\treturn dict_graph", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def parse_osm_nodes_paths(osm_data):\n\n nodes = {}\n paths = {}\n relation = {}\n\n # for element in osm_data['elements']:\n # if element['type'] == 'relation':\n\n\n for element in osm_data['elements']:\n if element['type'] == 'node':\n key = element['id']\n nodes[key] = get_node(element)\n\n elif element['type'] == 'way': #osm calls network paths 'ways'\n key = element['id']\n # pp.pprint(element)\n paths[key] = get_path(element,osm_data)\n\n return nodes, paths", "def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph", "def make_complete_graph(num_nodes):\r\n result = {}\r\n for idx in range(0,num_nodes):\r\n result[idx] = set([])\r\n for jdx in range(0,num_nodes):\r\n if (idx!=jdx):\r\n result[idx].add(jdx)\r\n return result", "def add_nodes(self, node_name_list):\n nodes = requests.post(self.__url + 'nodes', data=json.dumps(\n node_name_list), headers=HEADERS).json()\n node_dict = {}\n for node in nodes:\n node_dict[node['name']] = node['SUID']\n return node_dict", "def create_nodes(self):", "def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}", "def create_nodes(self, articles, merges):\n # self.all_nodes = [RealNode(a) for a in articles]\n if merges is None:\n for article in articles:\n matching_layer = self.find_layer_by_name(article[\"year\"])\n matching_layer.create_node(article)\n else:\n for merge in merges:\n art1 = next(x for x in articles if x['key'] == merge['art1'])\n art2 = next(x for x in articles if x['key'] == merge['art2'])\n if 'art3' in merge:\n art3 = next(x for x in articles if x['key'] == merge['art3'])\n matching_layer = self.find_layer_by_name(art1['year'])\n merge_key = art1['key'] + art2['key']\n merge_dict = {'key': merge_key, 'year': matching_layer.name, 'art1': art1, 'art2': art2, 'merge': merge}\n if 'art3' in merge:\n merge_key += art3['key']\n merge_dict = {'key': merge_key, 'year': matching_layer.name, 'art1': art1, 'art2': art2, 'art3': art3, 'merge': merge}\n new_node = matching_layer.create_node(merge_dict)\n new_node.kind = \"Merge\"\n for article in articles:\n merge_art = False\n for merge in merges:\n if article['key'] == merge['art1'] or article['key'] == merge['art2'] or \\\n ('art3' in merge and article['key'] == merge['art3']):\n merge_art = True\n break\n if not merge_art:\n matching_layer = self.find_layer_by_name(article[\"year\"])\n matching_layer.create_node(article)", "def get_child_map():\n entities = common.load_entity_list('../entities.csv')\n\n be_agents = [Agent(be_id, db_refs={'FPLX': be_id})\n for be_id in entities]\n ex = Expander(hierarchies)\n child_map = {}\n for be_agent in be_agents:\n children = ex.get_children(be_agent)\n children_up_ids = []\n for child_ns, child_id in children:\n if child_ns == 'HGNC':\n hgnc_id = hgnc_client.get_hgnc_id(child_id)\n up_id = hgnc_client.get_uniprot_id(hgnc_id)\n children_up_ids.append(up_id)\n else:\n print(\"Unhandled NS: %s %s\" % (child_ns, child_id))\n continue\n child_map[be_agent.name] = list(set(children_up_ids))\n return child_map", "def _gen_config():\n cfg = {\"frontends\": {}, \"backends\": {}}\n for machine in Machine.objects(\n monitoring__hasmonitoring=True,\n ):\n frontend, backend = _gen_machine_config(machine)\n cfg[\"frontends\"][machine.id] = frontend\n cfg[\"backends\"][machine.id] = backend\n return cfg", "def _configure_managers() -> Tuple[tff.simulation.FileCheckpointManager,\n List[tff.simulation.MetricsManager]]:\n root_output_dir = FLAGS.root_output_dir\n experiment_name = FLAGS.experiment_name\n utils_impl.create_directory_if_not_exists(root_output_dir)\n\n checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)\n utils_impl.create_directory_if_not_exists(checkpoint_dir)\n checkpoint_manager = tff.simulation.FileCheckpointManager(\n checkpoint_dir, step=FLAGS.rounds_per_checkpoint)\n\n results_dir = os.path.join(root_output_dir, 'results', experiment_name)\n utils_impl.create_directory_if_not_exists(results_dir)\n csv_file = os.path.join(results_dir, 'experiment.metrics.csv')\n csv_manager = tff.simulation.CSVMetricsManager(csv_file)\n\n summary_dir = os.path.join(root_output_dir, 'logdir', experiment_name)\n tensorboard_manager = tff.simulation.TensorBoardManager(summary_dir)\n\n logging.info('Writing...')\n logging.info(' checkpoints to: %s', checkpoint_dir)\n logging.info(' CSV metrics to: %s', csv_file)\n logging.info(' TensorBoard summaries to: %s', summary_dir)\n\n return checkpoint_manager, [csv_manager, tensorboard_manager]", "def generate_nodes(self):\n \n # For all state nodes\n node = 0\n \n for i in range(self.x0_n):\n for j in range(self.x1_n):\n for k in range(self.x2_n):\n \n # State\n x = np.array([ self.xd[0][i] , self.xd[1][j] , self.xd[2][k] ])\n \n # State and grid index based on node #\n self.nodes_state[node,:] = x\n self.nodes_index[node,:] = np.array([i,j,k])\n \n # Node # based on index ijk\n self.x_grid2node[i,j,k] = node\n \n # Increment node number\n node = node + 1", "def create_tree_structure(prosecutor_offices):\n tree_dict = {}\n for prosecutor_office in prosecutor_offices:\n adder(tree_dict, prosecutor_office, {'departments': {}, 'divisions': []})\n for department in prosecutor_office.department_set.all():\n adder(tree_dict[prosecutor_office]['departments'], department, department.division_set.all())\n for division in prosecutor_office.division_set.all():\n if not division.department:\n tree_dict[prosecutor_office]['divisions'].append(division)\n return tree_dict", "def dictize(self):\n dict = {}\n for node in self.sort():\n logger.debug(\"Dictize: id %s has name %s\" % (node._id, node.name))\n x = node._kwargs()\n dict[node._id]={\"klass\":node.__class__.__name__, \n \"kwargs\": x,\n \"children\":[child._id for child in node.children()]}\n return dict", "def _build_nodes(self):\n res = super(MacFix, self)._build_nodes()\n\n observed_addresses = set(inner for outer in [host.mac_addresses for host in self._hosts.values()] for inner in outer)\n unsatisfied = self._macfix__mac_addresses.difference(observed_addresses)\n unexpected = observed_addresses.difference(self._macfix__mac_addresses)\n if len(unsatisfied) > 1:\n raise AssertionError(\"unsatisfied: %s -- 2 or more host MAC addresses could not be found. Are you using the correct topology file?\" % unsatisfied)\n assert len(unexpected) == len(unsatisfied)\n if len(unsatisfied) == 1:\n self._macfix__result = {list(unsatisfied)[0]: list(unexpected)[0]}\n else:\n self._macfix__result = {}\n\n return res", "def getMasterMap(self,masterInfo):\n masterMap = [0]\n #--Map'em\n for mmName in masterInfo.masterNames:\n if mmName not in self.masterNames: \n raise MoshError(_(\"Misordered esm: %s should load before %s\") % (mmName, masterInfo.name))\n masterMap.append(self.masterNames.index(mmName)+1)\n #--Done\n return masterMap", "def _build_scene(self, nodes):\n new_scene = {}\n\n new_nodes_ind = self._resolve_mapping(inp=nodes, mapping=self.nodes_map)\n if new_nodes_ind:\n new_scene[\"nodes\"] = new_nodes_ind\n\n return new_scene", "def organize_hierarchy(hierarchy_dict):\n top_down_hierarchy = defaultdict(dict)\n for employee, supervisor in hierarchy_dict.items():\n manager = top_down_hierarchy[supervisor]\n manager[employee] = top_down_hierarchy[employee]\n\n # remove the previously created employee key since it has now been assigned to a manager\n top_down_hierarchy.pop(employee)\n\n return dict(top_down_hierarchy)", "def compute_helper_mempool_dictionaries():\n txn_density_dict = {}\n txn_parents_dict = {}\n txn_size_dict = {}\n mempool_data = parse_mempool_csv()\n for elem in mempool_data:\n size = elem.weight/MAXIMUM_BLOCK_WEIGHT # weight mapped to (0,1)\n txn_size_dict[elem.txid] = size \n txn_density_dict[elem.txid] = elem.fee/size\n if elem.parents != '':\n txn_parents_dict[elem.txid] = elem.parents.strip().split(';')\n return txn_density_dict,txn_parents_dict,txn_size_dict", "def _add_nodes_to_mapping(self, nodes: Iterable[graph.Node]) -> None:\n nodes = filter(lambda node: node.element_id not in self._id_to_obj, nodes)\n if not nodes:\n logger.debug(\n \"No nodes to parse packs because all of them in mapping\",\n self._id_to_obj,\n )\n return\n with Pool(processes=cpu_count()) as pool:\n results = pool.starmap(\n _parse_node, ((node.element_id, dict(node.items())) for node in nodes)\n )\n for result in results:\n assert result.database_id is not None\n self._id_to_obj[result.database_id] = result", "def create_ner_tags_dict():\r\n global ne_tags_set, ner_to_id, ne_tags, id_to_ner\r\n\r\n ne_tags = list(ne_tags_set) + ['[CLS]', '[SEP]']\r\n ne_tags.sort()\r\n id_to_ner = {idx: tag for idx, tag in enumerate(ne_tags)}\r\n ner_to_id = {tag: idx for idx, tag in enumerate(ne_tags)}\r\n print(f'Total NER tag size: {len(ne_tags)}; Tags: {ne_tags}')" ]
[ "0.6298359", "0.62928385", "0.59723586", "0.59544337", "0.586242", "0.58006024", "0.57427794", "0.5736032", "0.56940466", "0.56494904", "0.5589373", "0.5572555", "0.5556493", "0.5530863", "0.55166465", "0.5495922", "0.5472622", "0.5469694", "0.5432187", "0.54266393", "0.54139537", "0.54112816", "0.5410967", "0.5399453", "0.5391167", "0.53517604", "0.5345311", "0.53422654", "0.5328544", "0.5322281", "0.5275944", "0.52624947", "0.5252813", "0.52495486", "0.524606", "0.5236058", "0.52262676", "0.5202483", "0.51903033", "0.51884115", "0.5181626", "0.5181202", "0.5163119", "0.5151403", "0.51430327", "0.5083845", "0.50704503", "0.50579816", "0.5057693", "0.50572795", "0.50552857", "0.505197", "0.5045404", "0.5033903", "0.5031232", "0.50126386", "0.50074464", "0.49969077", "0.49935704", "0.49905145", "0.4985517", "0.498407", "0.49828684", "0.49826118", "0.4980077", "0.49799594", "0.4976684", "0.4970874", "0.4970663", "0.4956384", "0.4944589", "0.49250385", "0.49210703", "0.49192852", "0.49162856", "0.4913188", "0.48984152", "0.48959082", "0.48949656", "0.48930764", "0.4887773", "0.48815635", "0.48774907", "0.48760462", "0.4874016", "0.48704568", "0.4865065", "0.48589873", "0.48565823", "0.48527625", "0.485081", "0.485061", "0.4850445", "0.48308447", "0.482686", "0.48218215", "0.48185045", "0.48145893", "0.48142055", "0.48134342" ]
0.5909479
4
This function recursively builds a string of manager to employee relationships starting from the managers that do not have managers.
def findHierarchy(self): def __recursiveHelper(key_name, output, indent): if key_name in self.relations: for employee in self.relations[key_name].employees: output += " " * indent + str(employee) +"\n" # return __recursiveHelper(employee, output, indent+1) __recursiveHelper(employee, output, indent+1) else: print(output) return output #experimenting with Iter() and next() iterators/generators #and a while loop in the recursive function: # def __recursiveHelper(key_name, output, indent): # if key_name in self.relations: # employees = iter(self.relations[key_name].employees) # employee = next(employees, "stop") # while employees and employee != 'stop': # output += " " * indent + str(employee) +"\n" # __recursiveHelper(next(employees, "stop"), output, indent+1) # else: # employee = next(employees, "stop") # # else: # return output output = "" indent = -1 # self.relations is a dictionary of manager-name string keys. # The employees of None are the top-ranking managers. # only issue: # having trouble returning the concatenated output # from the recursive function: return __recursiveHelper(None, output, indent+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])", "def generate_full_chain(chain):\n list_of_subchains = [extract_amino_acids(subchain) for subchain in chain]\n # Join list into single string separated by spaces\n return ' '.join(list_of_subchains)", "def str_recursive(node):\n\n if node == None:\n return \"\"\n else:\n return str(node.item) + \" \" + LinkedList.str_recursive(node.next)", "def phone_dir_nav_eager():\n\n emps = Employee.query.options(db.joinedload('dept')).all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")", "def phone_dir_nav():\n\n emps = Employee.query.all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")", "def get_movie_people_relation(title, people_dict, movie_people_dict):\n for item in title:\n for key in people_dict.keys():\n for movie_title in people_dict[key]:\n if item == movie_title:\n if item in movie_people_dict.keys():\n if key not in movie_people_dict[item]:\n movie_people_dict[item] += ',' + key\n else:\n movie_people_dict[item] = key\n else:\n if item not in movie_people_dict.keys():\n movie_people_dict[item] = ''\n return movie_people_dict", "def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted", "def _intermediary_to_dot(tables, relationships):\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)", "def _generate_hierarchy_string(self, skeleton):\n hierarchy_string = \"HIERARCHY\\n\"\n hierarchy_string += self._generate_joint_string(skeleton.root, skeleton, 0)\n return hierarchy_string", "def asngen(pool):\n pool = AssociationPool.read(pool)\n rules = AssociationRegistry()\n (asns, orphaned) = generate(pool, rules)\n result = []\n result.append('There where {:d} associations found.'.format(len(asns)))\n result.append('There where {:d} orphaned exposures.'.format(len(orphaned)))\n for assocs in asns:\n result.append(assocs.__str__())\n\n return '\\n'.join(result)", "def _return_string_all_descendants_rec(self, node, string, level):\n if len(node.get_children()) == 0:\n return string\n else:\n level += 1\n for child in node.get_children():\n string += \"| \"*level\n string += \"|---\" + str(child) + \"\\n\"\n string = self._return_string_all_descendants_rec(child, string, level)\n return string", "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)", "def phone_dir_join_outerjoin():\n\n emps = (db.session.query(Employee, Department)\n .outerjoin(Department).all())\n\n for emp, dept in emps: # [(<E>, <D>), (<E>, <D>)]\n if dept:\n print(emp.name, dept.dept_name, dept.phone)\n else:\n print(emp.name, \"-\", \"-\")", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def make_to_string(front, mid, back, empty_repr):\n \"*** YOUR CODE HERE ***\"\n def printer(lnk):\n if lnk == Link.empty:\n return empty_repr\n else:\n return front + str(lnk.first) + mid + printer(lnk.rest) + back\n return printer", "def str_reverse_recur(node):\n\n if node == None:\n return \"\"\n else:\n return LinkedList.str_reverse_recur(node.next) + \" \" + str(node.item)", "def _generate_sql_parts(self, node,i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in self.DiG.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,self.DiG.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[1],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_sql_parts(edge[1],i+1,colNames,sql)\n\t\t\t# if 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\t# referencesPersonFact = True\n\t\t# print('_generate_sql_parts')\n\t\t# print(colNames)\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\tnet_new_colNames = []\n\t\t# remove colNames of already in leaf table\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql", "def format_relation(relation: list):\n pattern = \"%1s%6s%8i%1i%1s%10.2f%10.2f%1i%5i%5i%1i%10.2f%10.2f%10.2f%1i\"\n return pattern % (\n relation[0],\n relation[1] if relation[1] is not None else 0,\n relation[2],\n relation[3],\n relation[4],\n relation[5],\n relation[6],\n relation[7],\n relation[8],\n relation[9],\n relation[10],\n relation[11],\n relation[12],\n relation[13],\n relation[14]\n )", "def get_personnel():\r\n if len(man) == 0:\r\n print(\"There are no managers\")\r\n else:\r\n for i in man:\r\n print(str(i))", "def _generate_subgraph_sql_parts(self, node, subgraph, i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in subgraph.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,subgraph.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],subgraph.get_edge_data(*edge)['Column'],edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],subgraph.get_edge_data(*edge)['Column'],edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_subgraph_sql_parts(edge[1],subgraph,i+1,colNames,sql)\n\t\t\tif 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\treferencesPersonFact = True\n\t\t# print('_generate_subgraph_sql_parts')\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\t# remove colNames of already in leaf table\n\t\tnet_new_colNames = []\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql", "def get_programs(e: str, ans: str, all_paths_around_e: List[List[str]]):\n all_programs = []\n for path in all_paths_around_e:\n for l, (r, e_dash) in enumerate(path):\n if e_dash == ans:\n # get the path till this point\n all_programs.append([x for (x, _) in path[:l + 1]]) # we only need to keep the relations\n return all_programs", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node", "def _parse_relators(rels):\n return rels", "def phone_dir_join():\n\n emps = (db.session.query(Employee.name,\n Department.dept_name,\n Department.phone)\n .join(Department).all())\n\n for name, dept, phone in emps: # [(n, d, p), (n, d, p)]\n print(name, dept, phone)", "def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]", "def __str__(self):\n _str = \"\"\n current_node = self._head\n while(current_node != None):\n _str += str(current_node.value)\n _str += \" -> \"\n current_node = current_node.next\n _str += \"None\"\n return _str", "def macs_to_str(self, reached_max_depth: bool) -> str:\n if self.num_params > 0 and (\n reached_max_depth or not any(self.module.children())\n ):\n return f\"{self.macs:,}\"\n return \"--\"", "def get_relations(char):\n\n def parse_name(relation):\n \"\"\"Helper function for outputting string display of character name\"\"\"\n if relation.player:\n char_ob = relation.player.char_ob\n return \"%s %s\" % (char_ob.key, char_ob.item_data.family)\n else:\n return str(relation)\n\n try:\n dom = char.player_ob.Dominion\n parents = []\n uncles_aunts = []\n for parent in dom.all_parents:\n parents.append(parent)\n for sibling in parent.siblings:\n uncles_aunts.append(sibling)\n for spouse in sibling.spouses.all():\n uncles_aunts.append(spouse)\n\n unc_or_aunts = set(uncles_aunts)\n relations = {\n \"parents\": [parse_name(ob) for ob in parents],\n \"siblings\": list(parse_name(ob) for ob in dom.siblings),\n \"uncles_aunts\": list(parse_name(ob) for ob in unc_or_aunts),\n \"cousins\": list(parse_name(ob) for ob in dom.cousins),\n }\n return relations\n except AttributeError:\n return {}", "def __str__(self):\n width = int(np.prod(self.no_parent_states)) #multiplies the elements in list, here tot num of states\n grid = np.meshgrid(*[range(i) for i in self.no_parent_states]) \n s = \"\"\n for (i, e) in enumerate(self.parents):\n s += '+----------+' + '----------+' * width + '\\n'\n gi = grid[i].reshape(-1)\n s += f'|{e:^10}|' + '|'.join([f'{e + \"(\"+str(j)+\")\":^10}' for j in gi])\n s += '|\\n'\n\n for i in range(self.no_states):\n s += '+----------+' + '----------+' * width + '\\n'\n state_name = self.name + f'({i})'\n s += f'|{state_name:^10}|' + '|'.join([f'{p:^10.4f}' for p in self.table[i]])\n s += '|\\n'\n\n s += '+----------+' + '----------+' * width + '\\n'\n\n return s", "def render(self) -> str:\n lines = []\n seen_node = set()\n\n def gen_line(indent, n_id):\n if (indent, n_id) in seen_node:\n return\n seen_node.add((indent, n_id))\n\n conn_symbol = [\"|--\", \"`--\"]\n last = len(self._graph[n_id]) - 1\n for i, next_n_id in enumerate(self._graph[n_id]):\n node = self._id_to_term_node[next_n_id]\n lines.append(\n f\"{indent}{conn_symbol[1 if i==last else 0]}{node.type} {node.other_info}\"\n )\n next_indent = indent\n # increase indent for the next level.\n next_indent += \" \" if (i == last) else \"| \"\n gen_line(next_indent, next_n_id)\n\n first_node_id = self._node_id_rpo[0]\n first_node = self._id_to_term_node[first_node_id]\n lines.append(f\"@{self._name}({first_node.other_info})\")\n gen_line(\"\", first_node_id)\n\n return \"\\n\".join(lines)", "def generate_edgelist(H, delimiter=\" \"):\n for id in H.edges:\n e = H.edges.members(id)\n yield delimiter.join(map(str, e))", "def _intermediary_to_markdown(tables, relationships):\n t = '\\n'.join(t.to_markdown() for t in tables)\n r = '\\n'.join(r.to_markdown() for r in relationships)\n return '{}\\n{}'.format(t, r)", "def __str__(self) -> str:\n return '\\n'.join([' '.join([str(u) for u in row]) for row in self.adjacency_matrix])", "def get_M_as_string(self):\n return '\\n'.join(['M({})={}'.format(p.name, p.M) for p in self.P])", "def construct_paths(data, relation_level_words, qald=False,goldorpred='gold'):\n abstract_question = data[goldorpred]['abstract_question'].replace('<e>', 'entity1').replace('<l>', 'literal1')\n question = ei.vocabularize(nlutils.tokenize(abstract_question))\n\n \"\"\"======\"\"\"\n question_dep = []\n if 'abstract_question_deppath' in data['gold']:\n for abstract_question_deppath_simple in data['gold']['abstract_question_deppath']:\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E0>', 'entity1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E1>', 'entity2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E2>', 'entity3')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L1>', 'literal1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L2>', 'literal2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L3>', 'literal3')\n question_dep.append([int(id_) for id_ in list(\n ei.vocabularize(nlutils.tokenize(abstract_question_deppath_simple.strip())))])\n if len(question_dep) == 0:\n question_dep.append([int(id_) for id_ in list(ei.vocabularize(nlutils.tokenize(abstract_question.strip())))])\n question_dep_mask_matrix = 1.0*np.ones((1, len(question_dep)))\n\n \"\"\"======\"\"\"\n\n '''goldpathindex 可能要用于mrr计算,有了goldpathindex其实就不需要no_positive_path'''\n candidates=[]\n for key in ['hop4','hop3_2','hop3_1','hop3_0','hop3','hop2','hop1']:\n if key in data[goldorpred]:\n candidates+=data[goldorpred][key]\n\n ####get gold path####\n goldpathindex = -1\n for index,candidate in enumerate(candidates):\n if np.array_equal(candidate, data['gold']['path']):\n goldpathindex=index\n break\n\n ##########get candidate path#####\n candidate_paths = []\n candidate_paths_words = []\n for cand_path in candidates:\n candidate_path=[]\n candidate_path_words=[]\n add=True\n for p in cand_path:\n # p = p.lower() lcquad\n if p in embeddings_interface.SPECIAL_CHARACTERS:\n candidate_path.extend( vocabularize_relation(p))\n else:\n if p not in relation_level_words:\n # add=False\n # break\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n else:\n if \"0\" not in relation_level_words[p]:\n # add=False\n # break\n # print('pppp', p, p.replace(\"http://dbpedia.org/property/\", \"\"), relation_level_words[p])\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('before',candidate_path_words)\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('end',ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]),candidate_path_words)\n else:\n candidate_path.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n candidate_path_words.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n if add:\n candidate_paths.append(np.asarray(candidate_path))\n candidate_paths_words.append(np.asarray(candidate_path_words))\n\n return question,\\\n np.asarray(question_dep), np.asarray(question_dep_mask_matrix),\\\n np.asarray(candidate_paths), np.asarray(candidate_paths_words),\\\n goldpathindex, candidates", "def _build_fullname(tree: dict) -> None:\n def _apply(item: dict) -> None:\n components = item.pop(\"components\")\n try:\n idx = components[::-1].index(None)\n except ValueError:\n pass\n else:\n components = components[len(components) - idx:]\n if components:\n item[\"fullname\"] = \".\".join(components)\n else:\n item[\"fullname\"] = None\n apply_tree(tree, _apply)", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def __str__(self):\n current = self.root\n nodes = [self.root]\n final = str(self.root) + \"\\n\"\n count = 0\n while len(nodes) != 0:\n count += 1\n if count == 10:\n return \"\"\n temp = []\n for node in nodes:\n if node.left != None:\n temp.append(node.left)\n final += str(node.left) + \" \"\n else:\n final += \"_ \"\n if node.right != None:\n temp.append(node.right)\n final += str(node.right) + \" \"\n else:\n final += \"_ \"\n if temp == []:\n if node == nodes[len(nodes) - 1]:\n break\n final += \"\\n\"\n nodes = temp\n self.in_order_traversal()\n for item in self.traverse:\n final += str(item.key) + \" \"\n final += \"\\n\"\n return final", "def serialize(self, root):\n\n if root is None:\n return \"\"\n curr_lvl=[root]\n next_lvl = []\n ans =[]\n\n while(curr_lvl):\n tmp_ans = \",\".join(str(node.val) if node is not None else \"*\" for node in curr_lvl)\n ans.append(tmp_ans)\n nxt_lvl = []\n for each in curr_lvl:\n if each is not None:\n nxt_lvl.append(each.left) \n nxt_lvl.append(each.right)\n\n curr_lvl=nxt_lvl\n\n return \";\".join(ans)", "def generate_recursive_rules(limit):\n\n return [\n # 8: 42 | 42 8 - recursion creates the pattern: 42 | 42 42 | 42 42 42 etc...\n f\"8: {' | '.join([('42 ' * x).strip() for x in range(1, limit + 1)])}\",\n # 11: 42 31 | 42 11 31 - recursion creates the pattern: 42 31 | 42 42 31 31 | 42 42 42 31 31 31 etc...\n f\"11: {' | '.join([('42 ' * x).strip() + ' ' + ('31 ' * x).strip() for x in range(1, limit + 1)])}\"\n ]", "def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]", "def relationships(self):", "def _populate_relationships(self, rec_curr):\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids]) \n rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)", "def get_mtree_newick(mtree):\n newick_str = ''\n \n for nm, mut in sorted(mtree, key=lambda x: x[0]):\n if isinstance(mut, MuTree):\n newick_str += '(' + gsub(',$', '', get_mtree_newick(mut)) + ')'\n \n if nm == \".\":\n newick_str += '{*none*},'\n else:\n newick_str += '{' + nm + '},'\n\n if mtree.depth == 0:\n newick_str = gsub(',$', '', newick_str) + ';'\n\n return newick_str", "def join_recursive(lst, sep):\n msg = ''\n for i in lst:\n if isinstance(i, tuple) or isinstance(i, list):\n msg += join_recursive(i, sep)\n else:\n msg += (i + sep)\n return msg", "def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)", "def generate_graph(nodes):\n \n nodes = nodes.dropna()\n l = []\n edge_format = \"{0} {1}\"\n \n for x1, x2 in zip(nodes.shift(), nodes):\n if not(pd.isnull(x1) or pd.isnull(x2)):\n if x1 != x2:\n l.append(edge_format.format(x1, x2))\n \n return l", "def make_dep_graph(depender):\n\tshutit_global.shutit_global_object.yield_to_draw()\n\tdigraph = ''\n\tfor dependee_id in depender.depends_on:\n\t\tdigraph = (digraph + '\"' + depender.module_id + '\"->\"' + dependee_id + '\";\\n')\n\treturn digraph", "def get_relatives_models(mat):\n if mat.model is not None:\n model = mat.model + '-' + str(mat.layer.dimensionality) + 'D'\n model = model.replace('-', '_')\n else:\n model = None\n\n parent1_model = None\n parent2_model = None\n if len(mat.parents) > 0:\n assert len(mat.parents[0].factors) == 2\n parent1_model = (mat.parents[0].model + '-2D').replace('-', '_')\n if len(mat.parents) > 1:\n assert len(mat.parents[1].factors) == 2\n parent2_model = (mat.parents[1].model + '-2D').replace('-', '_')\n elif len(mat.parents) > 2:\n raise NotImplementedError(\"More than two parents are not supported.\")\n\n return model, parent1_model, parent2_model", "def concantenation_of_names(self):\n for rec in self:\n if (rec.last_name and rec.first_name and rec.middle_name) and rec.name == False:\n rec.name= str(self.first_name) + \" \" + str(self.middle_name) + \" \" + str(self.last_name)", "def extract_emo_relations(self):\n for tweet_idx, tweet in enumerate(self.tweets):\n tweet_tokens = []\n idx2word, child2parent = {}, {}\n for word in tweet.rstrip().split('\\n'):\n if not word:\n sys.stderr.write(\"wat\")\n continue\n curr_word = Word(word.rstrip().split('\\t'), tweet_idx)\n idx2word[curr_word.idx] = curr_word\n child2parent[curr_word] = curr_word.parent\n\n # Isolate emotion words that are Verbs or Adjectives\n if curr_word.text in self.emo_kws and curr_word.pos in self.POS_LIST:\n self.tweet2emo[tweet_idx].append(curr_word)\n curr_word.is_emotion_word = True\n\n tweet_tokens.append(curr_word.text)\n\n # update tweet dictionary and add children to words\n self.add_relatives(child2parent, idx2word)\n tweet_text = \" \".join(tweet_tokens)\n self.idx2tweet[tweet_idx] = tweet_text\n\n # Create Tweet object\n self.add_tweet(tweet_idx, tweet_text, tweet_tokens, list(idx2word.values()))", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def _compute_repr(tree):\n if tree.height() == 2:\n return \"[.{} {} ] \".format(tree.label(), \"\".join(tree.leaves()))\n else:\n s = \"\"\n for child in tree:\n s += _compute_repr(child)\n return \"[.{} {} ] \".format(tree.label(), s)", "def marshall_relations(annosets, frame_relations, fe_relations,\n target_dirpath):\n annosets, _annosets, __annosets = itertools.tee(annosets, 3)\n frame_id_set = _get_frame_id_set(annosets)\n frame_relations_dict = _get_frame_relations_dict(\n frame_relations, frame_id_set, const.HIERARCHY_RELATION_TYPES)\n frames = [annoset.target.lexunit.frame for annoset in _annosets]\n ancestors_dict = _get_ancestors_dict(frames, frame_relations_dict)\n ancestors_output_file = files_utils.get_ancestors_filepath(target_dirpath)\n with open(ancestors_output_file, 'w', encoding='utf-8') \\\n as ancestors_stream:\n for frame_name, ancestors in ancestors_dict.items():\n if not ancestors:\n print(frame_name, file=ancestors_stream)\n else:\n print('{},{}'.format(frame_name, ','.join(ancestors)),\n file=ancestors_stream)\n frame_parents_dict = _get_frame_parents_dict(frames, frame_relations_dict)\n frame_parents_output_file = files_utils.get_frame_parents_filepath(\n target_dirpath)\n with open(frame_parents_output_file, 'w', encoding='utf-8') \\\n as frame_parents_stream:\n for frame_name, parents in frame_parents_dict.items():\n print('{},{}'.format(frame_name, ','.join(parents)),\n file=frame_parents_stream)\n fe_id_set = _get_fe_id_set(__annosets)\n fe_relations_dict = _get_fe_relations_dict(\n fe_relations, fe_id_set, const.HIERARCHY_RELATION_TYPES)\n rolemappings_output_file = files_utils.get_rolemappings_filepath(\n target_dirpath)\n with open(rolemappings_output_file, 'w', encoding='utf-8') \\\n as rolemappings_stream:\n for role_name, roles in sorted(fe_relations_dict.items()):\n print('{},{}'.format(role_name, ','.join(roles)),\n file=rolemappings_stream)", "def __str__(self):\n\n maze_rows = ['-' * self.nx * 2]\n for y in range(self.ny):\n maze_row = ['|']\n for x in range(self.nx):\n prefix = \" \"\n if self.cell_at(x, y).is_current_position:\n prefix = \"*\"\n if self.cell_at(x, y).walls['E']:\n maze_row.append(prefix + '|')\n else:\n maze_row.append(prefix + ' ')\n maze_rows.append(''.join(maze_row))\n maze_row = ['|']\n for x in range(self.nx):\n if self.cell_at(x, y).walls['S']:\n maze_row.append('-+')\n else:\n maze_row.append(' +')\n maze_rows.append(''.join(maze_row))\n return '\\n'.join(maze_rows)", "def logic_program_form(self):\r\n s = ''\r\n for x in self.new_sorts:\r\n s = s + 'dom(' + x + ').\\nis_a(' + x + ', nodes).\\n' \r\n for y in self.supersorts:\r\n s = s + 'link(' + x + ', ' + y + ').\\n'\r\n return s", "def get_tree_string(self, node):\n string = \"\"\n for child in sorted(node.children):\n string += node.depth * \"\\t\"\n if node.depth > 0:\n string += \"|\"\n string += node.feature + \"=\" + child\n if node.children[child].is_leaf:\n string += \":\" + node.children[child].pred + \"\\n\"\n else:\n string += \"\\n\" + self.get_tree_string(node.children[child])\n\n return string", "def _complete_name(self):\n for record in self:\n if record.parent_id:\n record.complete_name = record.parent_id.complete_name + ' / ' + record.name\n else:\n record.complete_name = record.name", "def cycles(self) -> str:\n if self.inheritanceCycles:\n return (\n ', '.join(\n ('<'.join(c.name for c in cycle))\n for cycle in self.inheritanceCycles))\n else:\n return None", "def fixRelations (self):\n\t\tnodes = self.getFieldElements (\"relation\")\n\t\tif not nodes: return\n\t\t\n\t\tprint \"\\n%s\" % self.getId()\n\t\tfor r in nodes:\n\t\t\tvalue = XmlUtils.getText(r)\n\t\t\tif not value: return\n\t\t\tXmlUtils.setText (r,\"\")\n\t\t\tif value.startswith (\"http://\"):\n\t\t\t\tr.setAttribute (\"type\", \"Has part\")\n\t\t\t\tr.setAttribute (\"url\", value)\n\t\t\telse:\n\t\t\t\tr.setAttribute (\"type\", \"Is related\")\n\t\t\t\tr.setAttribute (\"title\", value)\n\t\t\tprint r.toxml()\n\t\tif 0:\n\t\t\tself.write()\n\t\t\tprint \"wrote record\"", "def find_joins(table, root, graph, path=None):\n if table == root:\n return path\n if path is None:\n path = []\n candidates = []\n for parent, columns in graph[table][\"fks\"].items():\n if parent == table:\n continue\n for from_col, to_col, nullable, constraint_name in columns:\n if not nullable:\n found = find_joins(parent, root, graph, path + [(parent, from_col, to_col)])\n if found:\n candidates.append(found)\n candidates.sort(key=len)\n return candidates[0] if candidates else None", "def solve(self, g: List[List[str]]) -> None:\n n = len(g)\n m = len(g[0])\n for i in range(n):\n for j in range(m):\n if g[i][j] == 'O':\n g[i][j] = ' '\n def dfs(x, y):\n g[x][y]='O'\n for nx, ny in (x+1,y),(x-1,y),(x,y+1),(x,y-1):\n if 0<=nx<n and 0<=ny<m and g[nx][ny]==' ':\n dfs(nx, ny)\n for i in range(n):\n if g[i][0]==' ':\n dfs(i,0)\n if g[i][m-1]==' ':\n dfs(i,m-1)\n for i in range(m):\n if g[0][i]==' ':\n dfs(0,i)\n if g[n-1][i]==' ':\n dfs(n-1,i)\n for i in range(n):\n for j in range(m):\n if g[i][j]==' ':\n g[i][j]='X'\n return g", "def parents_to_string(parent_tuple):\n return str(parent_tuple[0])+\" \"+str(parent_tuple[1])", "def _join_mgm_lfn(self, mgm, lfn):\n if not mgm.endswith('/'): mgm += '/'\n return mgm + lfn", "def gen_str(self):\n min_edge_val = self[0][0]\n start_ix = 0\n\n for i in range(1, self.n):\n next_edge_val = self[i][0]\n if next_edge_val < min_edge_val:\n min_edge_val = next_edge_val\n start_ix = i\n\n return ''.join(str(x) for x in chain.from_iterable(\n self[(i + start_ix) % self.n] for i in range(self.n)))", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def find_relationship(person1, person2):\n lines1 = get_ancestor_lines(person1)\n lines2 = get_ancestor_lines(person2)\n mrcas = find_most_recent(set(lines1).intersection(set(lines2)))\n\n relationships = []\n for anc in mrcas:\n relationships.append((lines1[anc], lines2[anc]))\n return relationships", "def build_dot_str(self) -> Text:\n s = []\n s.append(\"digraph {\")\n for node in self.nodes:\n label = str(node)\n if node in self.start_nodes:\n label += \"S\"\n if node in self.accept_nodes:\n label += \"A\"\n s.append(f' \"{node}\" [label=\"{label}\"];')\n s.append(\"\")\n for from_node, transitions in self.nodes.items():\n for transition, to_nodes in transitions.items():\n if not transition:\n transition = \"&epsilon;\"\n for to_node in to_nodes:\n s.append(f' \"{from_node}\" -> \"{to_node}\" [label=\"{transition}\"];')\n s.append(\"}\")\n return \"\\n\".join(s)", "def _produce_none_relations(self):\n print \"Producing NONE-relations\"\n relations = self.relations\n events = self.events\n none_relations = []\n\n for source in events:\n for target in events:\n new_relation = Relation(\"NONE\", self.text_obj, source, target, RelationType.NONE)\n print new_relation\n\n if new_relation in relations:\n continue\n else:\n none_relations.append(new_relation)\n\n self.relations = self.relations + none_relations\n\n print \"Finished producing NONE-relations\"", "def text(self):\n\n # Format is a list with each node and its parent, separated by NaN values\n return self._list_link_to_parents(col_name=\"name\")", "def create_employee_structure(employees):\n employees_dict = {}\n for employee in position_sort(employees):\n if not employee.is_secretary:\n adder(employees_dict, employee.prosecutors_office, {'employees': [], 'departments': {}, 'divisions': {}})\n if employee.prosecutors_office and employee.department and employee.division:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'divisions', {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office and employee.department:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'employees', [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['employees'].append(employee)\n elif employee.prosecutors_office and employee.division:\n adder(employees_dict[employee.prosecutors_office]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office:\n employees_dict[employee.prosecutors_office]['employees'].append(employee)\n return employees_dict", "def stringify(cls, relationship):\n\n\t\tif relationship == cls.STUDENT:\n\t\t\treturn \"STUDENT\"\n\t\tif relationship == cls.TEACHER:\n\t\t\treturn \"TEACHER\"\n\t\tif relationship == cls.BOTH:\n\t\t\treturn \"BOTH\"\n\t\treturn \"ERROR\"", "def test_role_assignments_directed_graph_of_implied_roles(self):\n test_plan = {\n 'entities': {'domains': {'users': 1, 'projects': 1},\n 'roles': 6},\n # Three level tree of implied roles, where one of the roles at the\n # bottom is implied by more than one top level role\n 'implied_roles': [{'role': 0, 'implied_roles': [1, 2]},\n {'role': 1, 'implied_roles': [3, 4]},\n {'role': 5, 'implied_roles': 4}],\n # The user gets both top level roles\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 5, 'project': 0}],\n 'tests': [\n # The implied roles should be expanded out and there should be\n # two entries for the role that had two different prior roles.\n {'params': {'user': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 5, 'project': 0},\n {'user': 0, 'role': 1, 'project': 0,\n 'indirect': {'role': 0}},\n {'user': 0, 'role': 2, 'project': 0,\n 'indirect': {'role': 0}},\n {'user': 0, 'role': 3, 'project': 0,\n 'indirect': {'role': 1}},\n {'user': 0, 'role': 4, 'project': 0,\n 'indirect': {'role': 1}},\n {'user': 0, 'role': 4, 'project': 0,\n 'indirect': {'role': 5}}]},\n ]\n }\n test_data = self.execute_assignment_plan(test_plan)\n\n # We should also be able to get a similar (yet summarized) answer to\n # the above by calling get_roles_for_user_and_project(), which should\n # list the role_ids, yet remove any duplicates\n role_ids = self.assignment_api.get_roles_for_user_and_project(\n test_data['users'][0]['id'], test_data['projects'][0]['id'])\n # We should see 6 entries, not 7, since role index 5 appeared twice in\n # the answer from list_role_assignments\n self.assertThat(role_ids, matchers.HasLength(6))\n for x in range(0, 5):\n self.assertIn(test_data['roles'][x]['id'], role_ids)", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def as_string(self):\n\n ll_elements = []\n current = self\n while current:\n ll_elements.append(str(current.data))\n current = current.next\n\n return \"->\".join(ll_elements)", "def print_ahnentafel(start_person):\n # Note: We don't actually keep track of gender, so this basically assumes\n # that Fathers (HUSB) are always listed first in FAM records.\n todo = collections.deque()\n todo.append((1, start_person))\n while todo:\n index, person = todo.popleft()\n print index, person.name(), person.sex(), person.birthdate(), person.deathdate()\n for i, parent in enumerate(person.parents):\n todo.append((2 * index + i, parent))", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def linked_list_string(self, linked_list):\n\n if linked_list is not None:\n root_str = str(linked_list.val) + \" => \"\n next_str = self.linked_list_string(linked_list.next)\n\n return root_str + next_str\n return 'None'", "def _join_expanded_node(expanded_node):\n sorted_expanded_node = sorted(expanded_node, key=lambda x: x[0])\n return \" \".join([word for address, word in sorted_expanded_node])", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def lemma(self) -> str:", "def flattenNames(names, check=False):\n i = getIter(names)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item, str):\n yield item\n elif isinstance(item, tuple) and len(item) == 2\\\n and isinstance(item[0], str) and isinstance(item[1], list):\n for c in flattenNames(item[1], check):\n if c == None:\n yield c\n else:\n yield '%s/%s' % (item[0], c)\n else:\n if check:\n yield None\n item = i.next()\n except StopIteration:\n pass", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def propagate_lineage(nodes):\n for name in nodes:\n # Called just to populate lineages, both way\n get_node_lineage(nodes, name, 'up', 'ancestors')\n get_node_lineage(nodes, name, 'down', 'descendants')", "def arrows(m) -> str:\n return m.arrow_list", "def get_relatives(\n self, reltypes=None, relfilter=None, fetch_objects=True, ignore_missing=True\n ):\n ret = defaultdict(set)\n relations = self.icalendar_component.get(\"RELATED-TO\", [])\n if not isinstance(relations, list):\n relations = [relations]\n for rel in relations:\n if relfilter and not relfilter(rel):\n continue\n reltype = rel.params.get(\"RELTYPE\", \"PARENT\")\n if reltypes and not reltype in reltypes:\n continue\n ret[reltype].add(str(rel))\n\n if fetch_objects:\n for reltype in ret:\n uids = ret[reltype]\n ret[reltype] = []\n for obj in uids:\n try:\n ret[reltype].append(self.parent.object_by_uid(obj))\n except error.NotFoundError:\n if not ignore_missing:\n raise\n return ret", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def generate_milestone_data(supervisor_employee_dict, all_employee_dict, run_date):\n supervisor_milestone_list = []\n for supervisor_id in supervisor_employee_dict:\n supervisor_milestone_dict = {}\n employees = supervisor_employee_dict[supervisor_id]\n employee_dict = {}\n\n milestone_counter = 0\n\n # Remove the supervisor from all the employees leaving the non-managers behind\n all_employee_dict.pop(supervisor_id, None)\n supervisor_milestone_dict['supervisor_id'] = supervisor_id\n\n for emp in employees:\n hire_date = emp.get('hire_date')\n emp_id = emp.get('employee_id')\n anv_dates = calculate_anniversary_dates(\n hire_date,\n run_date\n )\n\n # This is built to support employees that share a common milestone date\n for date in anv_dates:\n group = employee_dict.setdefault(date, [])\n group.append(emp_id)\n\n # Sort the dict by date by converting into tuple and sorting\n milestone_tuple = [(v, k) for k, v in employee_dict.iteritems()]\n sorted_ms_tup = sorted(milestone_tuple, key=itemgetter(1))\n upcoming_milestone_list = []\n\n for employee_id_list, milestone_date in sorted_ms_tup:\n for emp_id in employee_id_list:\n\n # Do not print out more than 5 milestones\n if milestone_counter == 5:\n break\n\n upcoming_milestone = {\n 'employee_id': emp_id,\n 'anniversary_date': str(milestone_date)\n }\n upcoming_milestone_list.append(upcoming_milestone)\n milestone_counter += 1\n\n supervisor_milestone_dict['upcoming_milestones'] = upcoming_milestone_list\n supervisor_milestone_list.append(supervisor_milestone_dict)\n\n return supervisor_milestone_list, all_employee_dict", "def __str__(self):\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'", "def _get_uml_template(*, types: dict, type_mapping: dict, message_mapping: dict) -> str:\n relationships = []\n classes = []\n\n uml_template = \"\"\"\n digraph \"Protobuf UML class diagram\" {\n fontname = \"Bitstream Vera Sans\"\n fontsize = 8\n\n node [\n fontname = \"Bitstream Vera Sans\"\n fontsize = 8\n shape = \"record\"\n style=filled\n fillcolor=gray95\n ]\n\n edge [\n fontname = \"Bitstream Vera Sans\"\n fontsize = 8\n\n ]\n\n CLASSES\n\n RELATIONSHIPS\n }\n \"\"\"\n\n entry_index = 2\n for _type, message in types.items():\n type_template_text = StringIO()\n type_template_text.write(f\"\"\" {entry_index}[label = \"{{{_type}|\"\"\")\n fields = []\n for _field in message.fields:\n message_type = _field.message_type\n field_type = type_mapping[_field.type] # this will be 'message' if referencing another protobuf message\n\n if message_type:\n this_node = message_mapping[_type]\n that_node = message_mapping[message_type.name]\n relationships.append(f\" {this_node}->{that_node}\")\n field_type = message_type.name # so we replace the 'message' token by the actual name\n\n fields.append(f\"+ {_field.name}:{field_type}\")\n\n # add fields\n type_template_text.write(\"\\\\n\".join(fields))\n type_template_text.write(\"}\\\"]\\n\")\n entry_index += 1\n classes.append(type_template_text.getvalue())\n\n type_template_text.close()\n\n uml_template = uml_template.replace(\"CLASSES\", \"\\n\".join(classes))\n uml_template = uml_template.replace(\"RELATIONSHIPS\", \"\\n\".join(relationships))\n return uml_template", "def getHierarchies():", "def getHierarchies():", "def read_nell_relations():\n\trel=os.walk(\"nell/relations\")\n\trelation=[]\n\tfor i in rel:\n\t\ttrel=i[2]\n\tfor i in trel:\n\t\trelation.append(' '.join(segment(i.split(':')[1])))\n\treturn relation", "def meshRelationships(Objects):\r\n # Create some variables to be used to store objects\r\n foreheadVariable = []\r\n noseBridgeVariable = []\r\n noseVariable = []\r\n eyeVariable = []\r\n mouthLoopVariable = []\r\n mouthVariable = []\r\n cheekVariable = []\r\n chinVariable = []\r\n earVariable = []\r\n backHeadVariable = []\r\n lowerBackHeadVariable = []\r\n\r\n # Create the relationshipList\r\n relationshipList = []\r\n\r\n for forehead in Objects:\r\n if \"TubxForehead_geo_\" in forehead:\r\n foreheadVariable.append(forehead)\r\n\r\n for noseBridge in Objects:\r\n if \"TubxNoseBridge_geo_\" in noseBridge:\r\n noseBridgeVariable.append(noseBridge)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, noseBridge, forehead)\r\n\r\n for eye in Objects:\r\n if \"TubxEye_geo_\" in eye:\r\n eyeVariable.append(eye)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, eye, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, eye, noseBridge)\r\n\r\n for nose in Objects:\r\n if \"TubxNose_geo_\" in nose:\r\n noseVariable.append(nose)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, nose, noseBridge)\r\n\r\n for mouthLoop in Objects:\r\n if \"TubxMouthLoop_geo_\" in mouthLoop:\r\n mouthLoopVariable.append(mouthLoop)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList, mouthLoop, nose)\r\n\r\n for mouth in Objects:\r\n if \"TubxMouth_geo_\" in mouth:\r\n mouthVariable.append(mouth)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, mouth, mouthLoop)\r\n\r\n for cheek in Objects:\r\n if \"TubxCheek_geo_\" in cheek:\r\n cheekVariable.append(cheek)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, cheek, mouthLoop)\r\n\r\n for chin in Objects:\r\n if \"TubxChin_geo_\" in chin:\r\n chinVariable.append(chin)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, chin, mouthLoop)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, chin, cheek)\r\n\r\n for ear in Objects:\r\n if \"TubxEar_geo_\" in ear:\r\n earVariable.append(ear)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, ear, forehead)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, ear, cheek)\r\n\r\n for backhead in Objects:\r\n if \"TubxBackHead_geo_\" in backhead:\r\n backHeadVariable.append(backhead)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, backhead, forehead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, backhead, ear)\r\n\r\n for lowerbackhead in Objects:\r\n if \"TubxLowerBackHead_geo_\" in lowerbackhead:\r\n lowerBackHeadVariable.append(lowerbackhead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, lowerbackhead, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, lowerbackhead, backhead)\r\n\r\n for default in Objects:\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, default, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, default, noseBridge)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList,default,nose)\r\n for eye in eyeVariable:\r\n createRelationships(relationshipList, default, eye)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, default, mouthLoop)\r\n for mouth in mouthVariable:\r\n createRelationships(relationshipList, default, mouth)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, default, cheek)\r\n for chin in chinVariable:\r\n createRelationships(relationshipList, default, chin)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, default, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, default, backhead)\r\n for lowerbackhead in lowerBackHeadVariable:\r\n createRelationships(relationshipList, default, lowerbackhead)\r\n\r\n return relationshipList", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def hierarchy_name(self, adjust_for_printing=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if self.has_parent():\n return self._parent_.hierarchy_name() + \".\" + adjust(self.name)\n return adjust(self.name)" ]
[ "0.5634923", "0.53542036", "0.5284192", "0.5087596", "0.49705473", "0.4939793", "0.49350137", "0.48989594", "0.48973182", "0.48740613", "0.485191", "0.48157832", "0.47859207", "0.47704506", "0.47661456", "0.4745172", "0.47298232", "0.47289705", "0.4718572", "0.46928048", "0.4681505", "0.4680061", "0.46481207", "0.46174118", "0.45920128", "0.45801604", "0.45766482", "0.45643035", "0.45582384", "0.45357338", "0.45355636", "0.4531239", "0.45252168", "0.45124394", "0.44928846", "0.44886032", "0.4480552", "0.4477549", "0.44728994", "0.44633192", "0.44563386", "0.4442862", "0.44395444", "0.44332868", "0.44328684", "0.4432748", "0.44285583", "0.44238544", "0.44173244", "0.4413833", "0.44114515", "0.44046423", "0.43863153", "0.43848935", "0.4378083", "0.43599468", "0.4349559", "0.434426", "0.43413857", "0.43407878", "0.43336862", "0.43314472", "0.4330891", "0.4328735", "0.4327216", "0.43232557", "0.43222278", "0.4320857", "0.43204728", "0.43196926", "0.43133628", "0.43109602", "0.43108168", "0.4309664", "0.4293513", "0.42912504", "0.42900255", "0.4288948", "0.42874268", "0.4285579", "0.42798993", "0.42781654", "0.42772368", "0.42740017", "0.4267521", "0.4266127", "0.42602804", "0.42549324", "0.4254839", "0.4254519", "0.42465276", "0.42459542", "0.4240967", "0.42347002", "0.4231264", "0.4231264", "0.42278638", "0.42161143", "0.42110685", "0.4210861" ]
0.6343025
0
r""" 1d nonlinear elasticity riemann solver aux is expected to contain aux[i,0] density in cell i aux[i,1] bulk modulus in cell i
def rp_nel_1d(q_l, q_r, aux_l, aux_r, aux_global): meqn = 2 mwaves = 2 # Convenience nrp = np.size(q_l, 0) # Set up arrays for return values fwave = np.empty((nrp, meqn, mwaves)) s = np.empty((nrp, mwaves)) amdq = np.empty((nrp, meqn)) apdq = np.empty((nrp, meqn)) # Linearized bulk modulus, sound speed, and impedance: bulkl = sigmap(q_l[:, 0], aux_l[:, 1]) bulkr = sigmap(q_r[:, 0], aux_r[:, 1]) cl = np.sqrt(bulkl / aux_l[:, 0]) cr = np.sqrt(bulkr / aux_r[:, 0]) zl = cl * aux_l[:, 0] zr = cr * aux_r[:, 0] # Jumps: du = q_r[:, 1] / aux_r[:, 0] - q_l[:, 1] / aux_l[:, 0] dsig = sigma(q_r[:, 0], aux_r[:, 1]) - sigma(q_l[:, 0], aux_l[:, 1]) b1 = -(zr * du + dsig) / (zr + zl) b2 = -(zl * du - dsig) / (zr + zl) # Compute the f-waves # 1-Wave fwave[:, 0, 0] = b1 fwave[:, 1, 0] = b1 * zl s[:, 0] = -cl # 2-Wave fwave[:, 0, 1] = b2 fwave[:, 1, 1] = b2 * (-zr) s[:, 1] = cr # Compute the left going and right going fluctuations for m in range(meqn): amdq[:, m] = fwave[:, m, 0] apdq[:, m] = fwave[:, m, 1] return fwave, s, amdq, apdq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, cell):\n self._cell = cell\n self._residual_fn = gnmt_residual_fn", "def auxmin_f1_part_i(x,m_ind):\n \n tmp1 = 2.0*auxminrho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] \n tmp2 = 2.0*auxminrho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n\n # checking maximum used in auxminrho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 # alpha1 should be ok here. We do not solve aux and real problem at the same time. \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n\n return f", "def auxminrho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= auxmin_cc_piece(x,k_ind,m_ind) \n\n return f", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def __init__(self, ni, nj, nk):\n \n self.ni = ni\n self.nj = nj\n self.nk = nk\n \n self.nn = np.zeros(3)\n \n self.nn[0] = self.ni\n self.nn[1] = self.nj\n self.nn[2] = self.nk\n \n self.x0 = np.zeros(3)\n self.dh = np.zeros(3)\n self.xm = np.zeros(3)\n self.xc = np.zeros(3)\n \n self.EPS_0 = 8.85418782e-12\n self.QE = 1.602176565e-19;\n self.AMU = 1.660538921e-27\n self.ME = 9.10938215e-31;\n self.K = 1.380648e-23;\n self.EvToK = self.QE/self.K;\n \n self.phi = np.zeros((self.ni, self.nj, self.nk))\n self.phi_new = np.zeros((self.ni, self.nj, self.nk))\n self.R = np.zeros((self.ni, self.nj, self.nk))\n self.rho = np.zeros((self.ni, self.nj, self.nk))\n self.node_vol = np.zeros((self.ni, self.nj, self.nk))\n self.ef = np.zeros((self.ni, self.nj, self.nk, 3))", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def interior_element(self):\n\n temp = 0\n for j in range(3,2*self.Ne-2,2):\n\n L = (j-1)*self.h\n R = (j+1)*self.h\n\n psi_jm1 = (self.x-self.h*j)*(self.x-self.h*j-self.h)/(2*self.h**2)\n psi_j= -(self.x-self.h*j-self.h)*(self.x-self.h*j+self.h)/(self.h**2)\n psi_jp1 = (self.x-self.h*j)*(self.x-self.h*j+self.h)/(2*self.h**2)\n\n self.psi[j + temp] = psi_jm1\n self.psi[j + temp + 1] = psi_j\n self.psi[j + temp + 2] = psi_jp1\n\n d_psi_jm1 = sym.diff(psi_jm1,self.x)\n d_psi_j = sym.diff(psi_j,self.x)\n d_psi_jp1 = sym.diff(psi_jp1,self.x)\n\n psi_jj = d_psi_j*d_psi_j\n psi_jm1jm1 = d_psi_jm1*d_psi_jm1\n psi_jp1jp1 = d_psi_jp1*d_psi_jp1\n psi_jjm1 = d_psi_j*d_psi_jm1\n psi_jjp1 = d_psi_j*d_psi_jp1\n psi_jm1jp1 = d_psi_jm1*d_psi_jp1\n\n A_jj = sym.integrate(psi_jj, (self.x, L, R))\n A_jm1jm1 = sym.integrate(psi_jm1jm1, (self.x, L, R))\n A_jp1jp1 = sym.integrate(psi_jp1jp1, (self.x, L, R))\n A_jjm1 = sym.integrate(psi_jjm1, (self.x, L, R))\n A_jjp1 = sym.integrate(psi_jjp1, (self.x, L, R))\n A_jm1jp1 = sym.integrate(psi_jm1jp1, (self.x, L, R))\n\n rhs_jm1 = sym.integrate(self.f(self.x)*psi_jm1,(self.x,L,R))\n rhs_j = sym.integrate(self.f(self.x)*psi_j,(self.x,L,R))\n rhs_jp1 = sym.integrate(self.f(self.x)*psi_jp1,(self.x,L,R))\n\n a1 = [A_jm1jm1,A_jjm1,A_jm1jp1]\n a2 = [A_jjm1, A_jj, A_jjp1]\n a3 = [A_jm1jp1, A_jjp1, A_jp1jp1]\n\n A = np.array([a1, a2, a3]).reshape(3,3)\n b = np.array([rhs_jm1, rhs_j, rhs_jp1])\n\n for i in range(j-1,j+2):\n self.global_vector[i] += b[i-(j-1)]\n for k in range(j-1,j+2):\n self.global_matrix[i,k] += A[i-(j-1),k-(j-1)]\n\n temp += 1", "def calculateElementResidual(self):\n import pdb\n\n for ci in range(self.nc):\n self.elementResidual[ci].fill(0.0)\n #\n self.ellamDiscretization.updateElementResidual(self.elementResidual)", "def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx", "def _step_EM(\n self, X, indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2, n1, n2\n ):\n\n eps_1 = max(1e-4 / n1, 1e-9)\n eps_2 = max(1e-4 / n2, 1e-9)\n nq, nl = self.n_row_clusters, self.n_column_clusters\n\n ########################## E-step ##########################\n u = X.dot(tau_2) # Shape is (n1,nl)\n v = X.T.dot(tau_1) # Shape is (n2,nq)\n\n # Update of tau_1 with sparsity trick.\n l_tau_1 = (\n (\n (u.reshape(n1, 1, nl))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(2)\n + self._np.log(alpha_1.reshape(1, nq))\n + (self._np.log(1 - pi) @ tau_2.T).sum(1)\n )\n\n # For computationnal stability reasons 1.\n l_tau_1 -= l_tau_1.max(axis=1).reshape(n1, 1)\n tau_1 = self._np.exp(l_tau_1)\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_1[tau_1 < eps_1] = eps_1\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Re-Normalize.\n\n # Update of tau_2 with sparsity trick.\n l_tau_2 = (\n (\n (v.reshape(n2, nq, 1))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(1)\n + self._np.log(alpha_2.reshape(1, nl))\n + (tau_1 @ self._np.log(1 - pi)).sum(0)\n )\n\n # For computationnal stability reasons 1.\n l_tau_2 -= l_tau_2.max(axis=1).reshape(n2, 1)\n tau_2 = self._np.exp(l_tau_2)\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_2[tau_2 < eps_2] = eps_2\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Re-Normalize.\n ########################## M-step ##########################\n alpha_1 = tau_1.mean(0)\n alpha_2 = tau_2.mean(0)\n pi = (\n tau_1[indices_ones[0]].reshape(-1, nq, 1)\n * tau_2[indices_ones[1]].reshape(-1, 1, nl)\n ).sum(0) / (tau_1.sum(0).reshape(nq, 1) * tau_2.sum(0).reshape(1, nl))\n return pi, alpha_1, alpha_2, tau_1, tau_2", "def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term", "def nzErr(xerr, yerr, vxerr, vyerr, year_x, year_y, mag, alnDir = '13_08_21/', chainsDir = 'efit/chains_S0-2_newRV2/'):\n\n #Read in values for error in position and velocity of sgr*\n origin_val = asciidata.open('/g/ghez/align/' + alnDir + chainsDir + 'efit_summary.txt')\n ori_x0e = origin_val[25][0]\n ori_y0e = origin_val[26][0]\n ori_vxe = origin_val[27][0]\n ori_vye = origin_val[28][0]\n t_0 = 2000.0 #hard coded t_0 of sgr*\n\n # magBins=np.array([9,11,12,13,14,15,16,17,18,19,20,21])\n # deltaArr=np.array([3.5,71.0,58.0,210.0,300.0,650.0,700.0,1100.0,1900.0,2200.0,3000.0])*1e-6\n\n# delta = mag*0.0\n# for i in range(len(mag)):\n# for j in range(len(deltaArr)):\n# if ((mag[i] > magBins[j]) & (mag[i] <= magBins[j+1])):\n# delta[i]=deltaArr[j]\n\n#pdb.set_trace()\n\n #Update errors\n xerr = np.sqrt(xerr**2 + ori_x0e**2 + ((year_x - t_0)*ori_vxe)**2)\n yerr = np.sqrt(yerr**2 + ori_y0e**2 + ((year_y - t_0)*ori_vye)**2)\n vxerr = np.sqrt(vxerr**2 + ori_vxe**2)\n vyerr = np.sqrt(vyerr**2 + ori_vye**2)\n\n return xerr, yerr, vxerr, vyerr", "def nonlinear_electroelastodynamics(optimise=True):\n\n mesh = Mesh()\n mesh.Parallelepiped(upper_right_front_point=(1,1,0.001),nx=10,ny=10,nz=1, element_type=\"hex\")\n\n mu = 5.0e4\n mu1 = mu\n mu2 = mu\n eps_2 = 4.0*8.8541e-12\n v = 0.4\n lamb = 2.*mu*v/(1-2.*v)\n material = IsotropicElectroMechanics_108(3, mu1=mu1, mu2=mu2, lamb=lamb, eps_2=eps_2, rho=1200.)\n\n formulation = DisplacementPotentialFormulation(mesh)\n\n\n def dirichlet_function(mesh):\n\n boundary_data = np.zeros((mesh.points.shape[0],4))+np.NAN\n\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],0.)\n boundary_data[Z_0,3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],.001)\n boundary_data[Z_0,3] = 9e3\n\n return boundary_data\n\n boundary_condition = BoundaryCondition()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n nonlinear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=25,\n analysis_nature=\"nonlinear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n nonlinear_static_results = nonlinear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n nonlinear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"nonlinear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n compute_energy_dissipation=True,\n compute_linear_momentum_dissipation=True,\n )\n\n nonlinear_dynamic_results = nonlinear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n # boundary_condition.__reset_state__()\n # boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n # nonlinear_dynamic_solver_exp = FEMSolver(total_time=6.,\n # number_of_load_increments=200000,\n # save_frequency=200000,\n # analysis_nature=\"nonlinear\",\n # analysis_type=\"dynamic\",\n # analysis_subtype=\"explicit\",\n # newton_raphson_tolerance=1e-5,\n # newton_raphson_solution_tolerance=1e-11,\n # optimise=optimise,\n # print_incremental_log=True,\n # )\n\n # nonlinear_dynamic_results_exp = nonlinear_dynamic_solver_exp.Solve(formulation=formulation, mesh=mesh,\n # material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"linear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n linear_static_results = linear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=1000,\n analysis_nature=\"linear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n break_at_increment=100,\n )\n\n linear_dynamic_results = linear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n s1 = nonlinear_static_results.GetSolutionVectors()\n s2 = nonlinear_dynamic_results.GetSolutionVectors()\n # s3 = nonlinear_dynamic_results_exp.GetSolutionVectors()\n s4 = linear_static_results.GetSolutionVectors()\n s5 = linear_dynamic_results.GetSolutionVectors()\n\n norm = lambda x: np.linalg.norm(x[:,2,-1])\n assert norm(s1) > 0.13 and norm(s1) < 0.15\n assert norm(s2) > 0.13 and norm(s2) < 0.15\n assert norm(s4) > 0.13 and norm(s4) < 0.15", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)", "def OIT_solver_fisher_rao(I0, I1, niter, eps, lamb, inverse_inertia_op,\n callback=None):\n # Get the space of I0\n domain = I0.space\n \n # Initialize the determinant of Jacobian of inverse deformation\n DPhiJacobian = domain.one()\n\n # Create gradient operator and divergence operator\n grad_op = Gradient(domain, method='forward', pad_mode='symmetric')\n div_op = - grad_op.adjoint\n \n # Create the temporary elements for update\n v = grad_op.range.element()\n\n # Initialize the non-mass-preserving deformed template\n non_mp_deform_I0 = I0\n \n inv_inertia_op = inverse_inertia_op\n\n # Store energy\n E = []\n kE = len(E)\n E = np.hstack((E, np.zeros(niter)))\n \n# print('Chong Chen')\n\n # Begin iteration\n for k in range(niter):\n # Compute the energy of the regularization term\n E[k+kE] = np.asarray(lamb * (np.sqrt(DPhiJacobian) - 1) ** 2).sum()\n\n # Implementation for mass-preserving case\n PhiStarI0 = DPhiJacobian * non_mp_deform_I0\n\n # Show intermediate result\n if callback is not None:\n callback(PhiStarI0)\n\n # For Fisher-Rao distance\n sqrt_mp_I0 = np.sqrt(PhiStarI0)\n sqrt_I1 = np.sqrt(I1)\n grad_sqrt_mp_I0 = grad_op(sqrt_mp_I0)\n grad_sqrt_I1 = grad_op(sqrt_I1)\n \n # Compute the energy of the data fitting term \n E[k+kE] += np.asarray((sqrt_mp_I0 - sqrt_I1)**2).sum()\n\n # Compute the L2 gradient of the data fitting term\n grad_fitting = grad_op.range.zero()\n for i in range(grad_op.range.size):\n grad_fitting[i] = sqrt_I1 * grad_sqrt_mp_I0[i] - \\\n sqrt_mp_I0 * grad_sqrt_I1[i]\n \n # Compute the minus L2 gradient\n u = - lamb * grad_op(np.sqrt(DPhiJacobian)) - grad_fitting\n\n # Compute inverse inertia\n v = inv_inertia_op(u)\n\n # Update the non-mass-preserving deformed template\n non_mp_deform_I0 = domain.element(\n _linear_deform(non_mp_deform_I0, - eps * v))\n\n # Implementation for updating Jacobian determinant\n DPhiJacobian = (1.0 - eps * div_op(v)) * domain.element(\n _linear_deform(DPhiJacobian, - eps * v))\n \n return PhiStarI0, E", "def potentialSolver(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # solve potential\n for it in np.arange(1,max_it+1):\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n\n R = -self.phi[i][j][k]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1])\n\n sum += R*R;\n\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n \n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def auxmax_f1_part_i(x,m_ind):\n \n tmp1 = 2.0*auxmaxrho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] \n tmp2 = 2.0*auxmaxrho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n\n # checking the maximum used in auxmaxrho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 # alpha1 should be ok here. We do not solve aux and real problems at the same time. \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n\n return f", "def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n\n Neq = np.eye(numZD,dtype=float) * 0.01\n Apart = np.zeros((numd,numZD))\n sd = np.zeros(numd)\n\n for i in range(0,numd):\n iz = np.floor(data[i,2]/zenSpacing)\n sd[i] = np.sin(data[i,2]/180.*np.pi)\n Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)\n\n prechi = np.dot(data[:,3].T,data[:,3])\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,data[:,3])\n Cov = np.linalg.pinv(Neq)\n \n Sol = np.dot(Cov,Bvec)\n \n postchi = prechi - np.dot(Bvec.T,Sol)\n \n pwl = Sol\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n model = np.dot(Apart,Sol)\n f = loglikelihood(data[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl,pwlsig,stats", "def r_ne(RR, AA, q, Yd, Ydd, Fe, Te, Conn, Prop):\n SS, SE, j_type = Conn[0], Conn[1], Conn[3]\n mass, inertia, cc, ce, Qe, gravity = \\\n Prop[0], Prop[1], Prop[2], Prop[3], Prop[5], Prop[6]\n qd = Yd[6:]\n\n num_j = len(q) # Number of joints/links\n num_b = num_j + 1 # Number of bodies\n num_e = SE.shape[1] # Number of endpoints\n\n # Linear and angular velocities of all bodies\n vv, ww = calc_vel(AA, q, Yd, Conn, Prop)\n\n # Linear and angular accelerations of all bodies\n vd, wd = calc_acc(AA, ww, q, qd, Ydd, Conn, Prop)\n\n # Inertial forces and moments on the body centroids (for convenience the\n # the gravitational force is also included here) - eqs. 3.30-3.31\n F_in = np.zeros((3, num_b))\n T_in = np.zeros((3, num_b))\n for i in range(num_b):\n\n A_I_i = AA[:, 3*i:3*(i+1)]\n In_I_i = A_I_i @ inertia[:, 3*i:3*(i+1)] @ A_I_i.T\n\n # Eq. 3.30 and 3.31\n F_in[:, i] = mass[i] * (vd[:, i] - gravity)\n T_in[:, i] = In_I_i @ wd[:, i] + cross(ww[:, i], (In_I_i @ ww[:, i]))\n\n # Forces and moments on the joints (eqs. 3.32-3.35)\n F_jnt = np.zeros((3, num_j))\n T_jnt = np.zeros((3, num_j))\n\n # Start from the last link\n for i in range(num_j, 0, -1):\n\n idxi = i - 1 # Index joint <i> in Fjnt, Tjnt, j_type, q\n A_I_i = AA[:, 3*i:3*(i+1)]\n is_P = float(j_type[idxi] == 'P') # = 1 if joint is prismatic\n\n # Vector centroid <i> to joint <i>\n L_ii = cc[:, i, i] - is_P * Ez * q[idxi]\n\n # Add inertial force and moment on the link centroid (the cross-product\n # is negative because the arm should go from the joint to the centroid)\n F_jnt[:, idxi] = F_in[:, i]\n T_jnt[:, idxi] = T_in[:, i] - cross(A_I_i @ L_ii, F_in[:, i])\n\n # Add force and moment due to connected upper links (note that a link\n # may have more than one upper connection)\n for j in range(i+1, num_j+1):\n\n idxj = j - 1 # Index joint <j> in j_type, q, F_jnt, T_jnt\n\n # Add contribution if link <j> is an upper connection of link <i>\n if (SS[i, j]):\n\n # Vector joint <i> to joint <j>\n L_ij = cc[:, i, j] - cc[:, i, i] + is_P * Ez * q[idxi]\n\n # Add joint force and moment (note that Fj and Tj are calculated\n # as joint force and moment on the j-th link, thus the force and\n # moment passed to the i-th link are equal and opposite)\n F_jnt[:, idxi] += F_jnt[:, idxj]\n T_jnt[:, idxi] += cross(A_I_i @ L_ij, F_jnt[:, idxj]) \\\n + T_jnt[:, idxj]\n\n # Add external forces and moments\n for ie in range(num_e):\n\n # Add contribution if endpoint <ie> is connected to link <i>\n if (SE[0, ie] == i):\n\n # Vector centroid <i> to endpoint <ie>\n A_i_ie = rpy2dc(Qe[:, ie]).T # Endpoint to link/joint\n A_I_ie = A_I_i @ A_i_ie # Endpoint to inertial\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n L_i_ie = A_i_ie.T @ (ce[:, ie] - cc[:, i, i]\n + is_P * Ez * q[idxi])\n F_jnt[:, idxi] -= A_I_ie @ Fe[:, ie]\n T_jnt[:, idxi] -= A_I_ie @ (tilde(L_i_ie) @ Fe[:, ie]\n + Te[:, ie])\n\n # If the external load is given wrt the inertial frame\n else:\n L_i_ie = A_I_ie @ (ce[:, ie] - cc[:, i, i]\n + is_P * Ez * q[idxi])\n F_jnt[:, idxi] -= Fe[:, ie]\n T_jnt[:, idxi] -= (tilde(L_i_ie) @ Fe[:, ie] + Te[:, ie])\n\n # Reaction force and moment on the base centroid (Eqs. 3.38 and 3.39)\n F0 = np.zeros(3)\n T0 = np.zeros(3)\n\n # Add inertial force and moment of the base\n F0 += F_in[:, 0]\n T0 += T_in[:, 0]\n\n # Add forces and moments from the links connected to the base\n for i in range(1, num_j+1):\n\n # Add if link <i> is connected\n if (SS[0, i] > 0):\n idxi = i - 1 # Index link/joint <i> in Fjnt, Tjnt\n F0 += F_jnt[:, idxi]\n T0 += cross(AA[:, 0:3] @ cc[:, 0, i], F_jnt[:, idxi]) \\\n + T_jnt[:, idxi]\n\n # Add external forces and moments\n for ie in range(num_e):\n\n # Add contribution if endpoint <ie> is connected to the base\n if (SE[0, ie] == 0):\n\n A_0_ie = rpy2dc(Qe[:, ie]).T # Endpoint to base\n A_I_ie = AA[:, 0:3] @ A_0_ie # Endpoint to inertial\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n R_0_ie = A_0_ie.T @ ce[:, ie]\n F0 -= A_I_ie @ Fe[:, ie]\n T0 -= A_I_ie @ (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # If the external load is given wrt the inertial frame\n else:\n R_0_ie = A_0_ie @ ce[:, ie]\n F0 -= Fe[:, ie]\n T0 -= (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # Calculation of joint torques/forces (eq. 3.36 and 3.37)\n tau = np.zeros(num_j)\n for i in range(1, num_j+1):\n\n idxi = i - 1 # Index link/joint <i> in Fjnt, Tjnt, j_type, tau\n Ez_I_i = AA[:, 3*i:3*(i+1)] @ Ez\n\n # If revolute joint (eq. 3.36)\n if (j_type[idxi] == 'R'):\n tau[idxi] = T_jnt[:, idxi] @ Ez_I_i\n\n # If prismatic joint (eq. 3.37)\n elif (j_type[idxi] == 'P'):\n tau[idxi] = F_jnt[:, idxi] @ Ez_I_i\n\n # Compose generalized forces\n Force = np.block([F0, T0, tau])\n\n return Force", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def potentialSolver2(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f", "def setup_U_int(self, degree=None, eps=10**(-15)):\n if self.exterior_scaling:\n print(\"This method does not work for exterior complex scaling\")\n raise NotImplementedError\n max_order = self.max_order\n U = np.zeros((self.dim, self.dim), dtype=complex)\n # iterating through the FEM cells for 2-dimensional integrals\n for i in range(self.N_cell):\n print(i)\n smin = self.x_grid[i]\n smax = self.x_grid[i+1]\n for j in range(self.N_cell):\n etamin = self.x_grid[j]\n etamax = self.x_grid[j+1]\n #full propagator -------------------------------------\n if max(abs(self.K_full(etamin, smin)),\n abs(self.K_full(etamin, smax)),\n abs(self.K_full(etamax, smin)),\n abs(self.K_full(etamax, smax))) < eps:\n pass\n else:\n U[j*max_order:(j+1)*max_order+1,\n i*max_order:(i+1)*max_order+1] +=\\\n self.FEM.K_matrix(self.K_full, smin=smin, smax=smax,\n etamin=etamin, etamax=etamax,\n degree=degree)\n self.U = U", "def auxminrho1(x,m_ind):\n \n cc_sum = auxminrho2(x,m_ind) \n f = cc_sum + auxmin_cc_piece(x,0,m_ind) \n cfg.max_piece[m_ind] = 0 # max_piece should be ok here. We do not solve aux and real problem at the same time.\n \n for k_ind in range(1,cfg.nomax):\n \n f_tmp = cc_sum + auxmin_cc_piece(x,k_ind,m_ind) \n if f_tmp > f: \n f = f_tmp\n cfg.max_piece[m_ind] = k_ind\n \n return f", "def magnetic_reynolds(uu, param, grid, aa=list(), bb=list(), jj=list(),\n nghost=3, lmix=True):\n if len(bb) ==0 and len(aa) ==0 and len(jj) ==0:\n print('magnetic_reynolds WARNING: no aa, bb nor jj provided\\n'+\n 'aa or bb must be provided or aa for only hyper resistivity') \n #resistive force\n lres, lhyper3 = False, False\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'hyper' not in iresi and len(iresi) > 0:\n lres = True\n if 'hyper3' in iresi:\n lhyper3 = True\n fresi = np.zeros_like(uu)\n if lres:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n if len(jj) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating jj without aa\\n',\n 'provide aa or jj directly for accurate boundary values')\n jj = curl(bb,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n else:\n jj = curl2(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n jj[j, :nghost,:,:] = jj[j,-2*nghost:-nghost,:,:]\n jj[j,-nghost:,:,:] = jj[j, nghost: 2*nghost,:,:]\n jj[j,:, :nghost,:] = jj[j,:,-2*nghost:-nghost,:]\n jj[j,:,-nghost:,:] = jj[j,:, nghost: 2*nghost,:]\n jj[j,:,:, :nghost] = jj[j,:,:,-2*nghost:-nghost]\n jj[j,:,:,-nghost:] = jj[j,:,:, nghost: 2*nghost]\n fresi = fresi + param.eta*param.mu0*jj\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'eta-const' not in iresi and 'hyper' not in iresi\\\n and len(iresi) > 0:\n print('magnetic_reynolds WARNING: '+iresi+' not implemented\\n'+\n 'terms may be missing from the standard resistive forces')\n if lhyper3:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: no aa provided\\n'+\n 'aa must be provided for hyper resistivity')\n return 1\n else:\n del6a = np.zeros_like(aa)\n for j in range(0,3):\n del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz)\n del6a[j, :nghost,:,:] = del6a[j,-2*nghost:-nghost,:,:]\n del6a[j,-nghost:,:,:] = del6a[j, nghost: 2*nghost,:,:]\n del6a[j,:, :nghost,:] = del6a[j,:,-2*nghost:-nghost,:]\n del6a[j,:,-nghost:,:] = del6a[j,:, nghost: 2*nghost,:]\n del6a[j,:,:, :nghost] = del6a[j,:,:,-2*nghost:-nghost]\n del6a[j,:,:,-nghost:] = del6a[j,:,:, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n #effective at l > 5 grid.dx? \n fresi = fresi + param.eta_hyper3*del6a\n del(del6a)\n fresi2 = np.sqrt(dot2(fresi))\n del(fresi)\n #advective force\n if len(bb) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating uu x bb without bb\\n',\n 'provide aa or bb directly to proceed')\n return 1\n else:\n bb = curl(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n bb[j, :nghost,:,:] = bb[j,-2*nghost:-nghost,:,:]\n bb[j,-nghost:,:,:] = bb[j, nghost: 2*nghost,:,:]\n bb[j,:, :nghost,:] = bb[j,:,-2*nghost:-nghost,:]\n bb[j,:,-nghost:,:] = bb[j,:, nghost: 2*nghost,:]\n bb[j,:,:, :nghost] = bb[j,:,:,-2*nghost:-nghost]\n bb[j,:,:,-nghost:] = bb[j,:,:, nghost: 2*nghost]\n advec = cross(uu,bb)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fresi2.max() > 0:\n fresi2[np.where(fresi2==0)] = fresi2[np.where(fresi2>0)].min()\n Rm = advec2/fresi2\n #set minimum floor to exclude zero-valued Rm \n if Rm.max() > 0:\n Rm[np.where(Rm==0)] = Rm[np.where(Rm>0)].min()\n else:\n print('Rm undefined')\n else:\n Rm = advec2\n print('Rm undefined')\n return Rm", "def h2co_mm_radex(xarr,\n Temperature=25,\n logColumn=13,\n logDensity=4,\n xoff_v=0.0,\n width=1.0,\n grid_vwidth=1.0,\n gridbundle = None,\n debug=False,\n verbose=False,\n **kwargs):\n\n # Convert X-units to frequency in GHz\n xarr = xarr.as_unit('Hz', quiet=True)\n Tex303,Tex322,Tex321,tau303,tau322,tau321 = gridbundle\n\n\n # if this gets too far different from 1, we are gonna have a Bad Time.\n scalefac = grid_vwidth/width\n\n tex = (Tex303(logColumn,logDensity,Temperature),\n Tex322(logColumn,logDensity,Temperature),\n Tex321(logColumn,logDensity,Temperature))\n tau = (tau303(logColumn,logDensity,Temperature)*scalefac,\n tau322(logColumn,logDensity,Temperature)*scalefac,\n tau321(logColumn,logDensity,Temperature)*scalefac)\n if np.any(np.isnan(tex)) or np.any(np.isnan(tau)):\n raise ValueError(\"Invalid column/density\")\n\n if verbose:\n for ta,tk in zip(tau,tex):\n print(\"density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g\" % (logDensity, Temperature, logColumn, ta, tk))\n\n if debug:\n import pdb; pdb.set_trace()\n\n# here there be physics\n ckms = 2.99792458e5\n freq_dict = {\n '303': 218.222192e9,\n '322': 218.475632e9,\n '321': 218.760066e9,\n }\n Tbg = 2.73 #because it totally is\n\n\n nu0 = np.array([ 218.222192e9, 218.475632e9,218.760066e9])\n nuwidth = [width/ckms*nu for nu in nu0]\n nuoff = [xoff_v/ckms*nu for nu in nu0]\n minfreq = nu0/1e9 - 0.25\n maxfreq = nu0/1e9 + 0.25\n# spec2 = np.zeros(len(xarr))\n# for ii in range(len(nu0)):\n# taunu = tau[ii]*np.exp(-(xarr+nuoff[ii]-nu0[ii])**2/(2.0*nuwidth[ii]**2))\n# spec2 = spec2 + (1-np.exp(-taunu))*tex[ii] + Tbg*(np.exp(-taunu)-1) #second term assumes an ON-OFF\n\n spec = np.sum([\n (formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),\n xoff_v=xoff_v, width=width, **kwargs)\n * (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],\n axis=0)\n# import pdb\n# pdb.set_trace()\n\n\n return spec", "def ne_fwd_iter(self, q_dot, q_ddot, omega_im1, alpha_im1, acc_e_im1):\n # # Calculate CM angular velocity (WCS)\n # self.omega = omega_im1 + q_dot * self.z_gl\n # # Calculate CM angular acceleration (WCS)\n # self.alpha = alpha_im1 + self.z_gl * q_ddot +\\\n # X(self.omega, self.z_gl) * q_dot\n # # Calculate CM linear acc (WCS)\n # self.acc = acc_e_im1 + X(self.alpha, -self.r_hc) +\\\n # X(self.omega, X(self.omega, -self.r_hc))\n # # Calculate body end (flange2) linear acc (WCS)\n # self.acc_e = acc_e_im1 + X(self.alpha, -self.r_ht) +\\\n # X(self.omega, X(self.omega, -self.r_ht))\n self.omega, self.alpha, self.acc, self.acc_e = fast_fwd_ne(\n self.z_gl, self.r_hc, self.r_ht,\n q_dot, q_ddot, omega_im1, alpha_im1, acc_e_im1\n )", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n znew = z\n\n iter = 0\n eps = 1.0e99\n while iter < self.maxiter and abs(eps) > self.atol:\n z = znew\n znew = 4.0 - x*z\n\n eps = x*znew + znew - 4.0\n\n unknowns['z'] = znew\n unknowns['y'] = x + 2.0*znew\n\n resids['z'] = eps\n #print(unknowns['y'], unknowns['z'])", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def __init__(self,head_offset=0,aquifer_type='unconfined',domain_center=0+0j,\r\n domain_radius=1,H = None,variables=[],priors=[],observations=[]):\r\n \r\n import numpy as np\r\n \r\n # Set potential scaling variables\r\n self.head_offset = head_offset\r\n self.aquifer_type = aquifer_type\r\n self.H = H\r\n \r\n # Set domain scaling variables\r\n self.domain_center = domain_center\r\n self.domain_radius = domain_radius\r\n \r\n if not np.isscalar(self.domain_center):\r\n self.domain_center = self.domain_center[0] + 1j*self.domain_center[1]\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n # Define a list for Analytic Elements\r\n self.elementlist = []\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n self.observations = observations\r\n \r\n # This function scrapes the model and its elements for unknown variables,\r\n # then gives this instance three new variables:\r\n # self.num_params Number of unknown variables\r\n # self.params List of unknown variables\r\n # self.param_names List of names of unknown variables\r\n # self.priors List of prior dictionaries for unknow variables\r\n self.take_parameter_inventory()\r\n \r\n self.linear_solver = False\r\n \r\n # Pre-allocate the function matrix and parameter vector for the linear solver\r\n self.matrix_solver = []\r\n self.params_vector = []", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def acc_visc(j,rA,vA,mA,rhoA,PA,hA,dW=kernel.dW_M4):\n assert rA.shape[0] == vA.shape[0] == mA.shape[0] == rhoA.shape[0] == hA.shape[0], \"arrays are not matched\"\n N = len(mA)\n c_j = c_gas(j,rhoA,PA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n v_ij = vA[j,:] - vA[i,:]\n m_i = mA[i]\n c_i = c_gas(i,rhoA,PA)\n c_ij = 0.5 * (c_i + c_j)\n h_ij = 0.5 * (hA[i] + hA[j])\n rho_ij = 0.5 * (rhoA[i] + rhoA[j])\n\n c = np.dot(v_ij,r_ij)\n mu_ij = ( c * h_ij ) / ( r_ij1**2 + 0.01*h_ij**2 )\n\n a = ( -alpha * mu_ij * c_ij + beta * mu_ij**2 ) / rho_ij\n b = 0\n Pi_ij = a*dm.heavi(-c) + b*dm.heavi(c)\n\n # if Pi_ij == 0:\n # print(\"i,j:\",i,j)\n # print(\"c:\",c)\n # print(\"c_ij\",c_ij)\n # print(\"\")\n # assert Pi_ij != 0\n\n tot += m_i * h_ij**(-4) * Pi_ij * dW(r_ij1,h_ij) * (r_ij/r_ij1)\n\n return - tot", "def solveverlet(self,T,dt):\r\n t = 0.\r\n self.dt = dt\r\n self.n = int(T/dt)\r\n L = self.param[2]\r\n N = self.particles.size\r\n\r\n self.U = np.zeros([self.n])\r\n\r\n progress = t/T*100\r\n\r\n #JV: Here we define the number of the GxG grid that we will need to calcule the entropy, change in order to change the precision of this grid\r\n self.G = 7\r\n\r\n #JV: We create a list that will be useful for the walls submenu, that will help us in the border conditions of the wall, see in vel_verlet()\r\n self.bouncing = np.zeros(self.particles.size)\r\n\r\n if(self.param[4] == \"Subsystems\"): #JV: If we are on \"Subsystems\", we will count different the types of particles\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2]) #JV: When we are not in \"Subsystems\", we will have the same type of variable, but will only use the [:,:,0] (this is because numba has problems otherwise)\r\n\r\n self.entropy_val = 0\r\n\r\n #JV: If we are simulating the brownian simulation, we initialize the array that will keep track if the brownian particle goes through a wall\r\n if(self.param[4] == \"Brownian\"):\r\n self.wallcount = np.zeros([2])\r\n else:\r\n self.wallcount = np.zeros([2]) #JV: We have to keep both in the same type of variables, otherwise numba will have problems. So now this conditional block is quite poinless. TO-ERASE\r\n\r\n np.vectorize(lambda i: i.reset())(self.particles) #This line resets the particles to their initial position\r\n\r\n self.vel_verlet_on = True #JV: If it's true, it will compute with the velocity verlet algorithm, if it's not, it will compute with normal verlet\r\n\r\n self.Nlist = int(1*(self.particles.size)**(1/2)) #JV:This variable defines the number of close particles that will be stored in the list (go to close_particles_list() for more info)\r\n #print(self.Nlist)\r\n\r\n #X,Y,VX,VY has the trajectories of the particles with two indexes that\r\n #access time and particles, respectively\r\n self.X = np.vectorize(lambda i: i.r[0])(self.particles)\r\n self.Y = np.vectorize(lambda i: i.r[1])(self.particles)\r\n self.VX = np.vectorize(lambda i: i.v[0])(self.particles)\r\n self.VY = np.vectorize(lambda i: i.v[1])(self.particles)\r\n\r\n MX, MXT = np.meshgrid(self.X[:],self.X[:])\r\n MY, MYT = np.meshgrid(self.Y[:],self.Y[:])\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: we first calculate the matrix that contains in every row the indexs of the m closest particles\r\n\r\n if(self.vel_verlet_on == True):\r\n #JV: We define the variables that we will need in the velocity verlet algorithm\r\n print(\"Computing with the Velocity-Verlet algorithm\")\r\n X0 = self.X\r\n Y0 = self.Y\r\n VX0 = self.VX\r\n VY0 = self.VY\r\n\r\n X1 = self.X\r\n Y1 = self.Y\r\n VX1 = self.VX\r\n VY1 = self.VY\r\n\r\n MX, MXT = np.meshgrid(X0[:],X0[:],copy=False)\r\n MY, MYT = np.meshgrid(Y0[:],Y0[:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n a0 = (1/self.m)*np.transpose(fv(X0[:],Y0[:],dx,dy,r2,t/self.dt,False,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2))\r\n\r\n for i in range(0, self.n):\r\n r1 = np.array([X0,Y0]) + np.array([VX0,VY0])*dt + 0.5*a0*dt**2\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(self.param[3] == \"Free!\"):\r\n #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n dx_v2 = (np.abs(dx.copy())-1*L)\r\n r2_v2 = dx_v2**2+dy**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n dy_v2 = (np.abs(dy.copy())-1*L)\r\n r2_v2 = dx**2+dy_v2**2\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n r2_v2 = dx_v2**2+dy_v2**2\r\n dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n #JV: call velocityverlet to compute the next position\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n X1,Y1,VX1,VY1,a1 = vel_verlet(t,dt,np.array([X0,Y0]),np.array([VX0,VY0]),a0,dx,dy,r2,self.close_list,self.m,self.R,L,N,self.param[3],self.param[4],self.param[7],self.param[8],self.param[9],self.U,self.Nlist,self.vel_verlet_on,self.param[5],self.grid,self.G,self.wallcount,self.X2,self.bouncing)\r\n\r\n #JV: Now we check where this particle is in a RxR grid, that will help us to calcule the entropy. We do not do this for the Brownian mode because we don't compute the entropy in that case.\r\n if(self.param[4] != \"Brownian\"):\r\n for h in range(0, N):\r\n if(self.param[4] == \"Subsystems\"):\r\n if(h < self.param[5]**2): #JV: self.param[5] stores the number of n1xn1 type 1 particles\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),0] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G)),1] += 1\r\n else:\r\n self.grid[int((X1[h]+0.495*L) / (L/self.G)), int((Y1[h]+0.495*L) / (L/self.G))] += 1\r\n\r\n if(self.param[4] == \"Brownian\"):\r\n if(self.wallcount[0] == 0):\r\n self.X2 = np.append(self.X2,(abs(X1[N-1]))**2)\r\n else:\r\n self.X2 = np.append(self.X2,(L*self.wallcount[0]+(X1[N-1]))**2)\r\n self.entropy = np.append(self.entropy,self.entropy_val)\r\n\r\n t += dt\r\n\r\n self.X = np.vstack((self.X,X1))\r\n self.Y = np.vstack((self.Y,Y1))\r\n self.VX = np.vstack((self.VX, VX1))\r\n self.VY = np.vstack((self.VY, VY1))\r\n a0 = a1\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n VX0,VY0 = VX1,VY1\r\n\r\n #JV: Every amount of steps of time we calculate the entropy\r\n update_entropy = 2\r\n if(i % update_entropy == 0):\r\n\r\n self.entropy_val = 0\r\n sumagrid = np.sum(self.grid)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n sumagrid_subs = np.zeros([2])\r\n sumagrid_subs[0] = np.sum(self.grid[:,:,0]) #JV: Number of type-0 particles\r\n sumagrid_subs[1] = sumagrid - sumagrid_subs[0] #JV: Number of type-1 particles\r\n\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n for l in range(2):\r\n if ((self.grid[j,k,0]+self.grid[j,k,1]) != 0):\r\n # pji = float(self.grid[j,k,l])/(update_entropy*(self.grid[j,k,0]+self.grid[j,k,1]))\r\n pji = float((self.grid[j,k,l]/(sumagrid_subs[l]/(sumagrid_subs[0]+sumagrid_subs[1])))/(update_entropy*(self.grid[j,k,0]/(sumagrid_subs[0]/(sumagrid_subs[0]+sumagrid_subs[1])))+(self.grid[j,k,1]/(sumagrid_subs[1]/(sumagrid_subs[0]+sumagrid_subs[1])))))\r\n else:\r\n pji = 0\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji) #JV: We will only calculate the value when pji != 0\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n else:\r\n for j in range(self.G):\r\n for k in range(self.G):\r\n pji = float(self.grid[j,k,0])/(update_entropy*sumagrid)\r\n if(pji != 0):\r\n self.entropy_val += -pji*np.log(pji)\r\n\r\n self.entropy_val = self.entropy_val /(self.G**2)\r\n\r\n if(self.param[4] == \"Subsystems\"):\r\n self.grid = np.zeros([self.G,self.G,2])\r\n else:\r\n self.grid = np.zeros([self.G,self.G,2])\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n else:\r\n print(\"Computing with the Verlet algorithm\")\r\n\r\n #Generation of the precious position (backwards euler step)\r\n X1 = self.X\r\n Y1 = self.Y\r\n X0 = X1 - self.VX*dt\r\n Y0 = Y1 - self.VY*dt\r\n\r\n for self.i in range(0,self.n):\r\n #Call verlet to compute the next position\r\n X2,Y2 = self.verlet(t,dt,np.array([X0,Y0]),np.array([X1,Y1]))\r\n t = t + dt\r\n\r\n #Add the new positions to X,Y,VX,VY\r\n self.X = np.vstack((self.X,X2))\r\n self.Y = np.vstack((self.Y,Y2))\r\n self.VX = np.vstack((self.VX,(X2-X0)/(2*dt)))\r\n self.VY = np.vstack((self.VY,(Y2-Y0)/(2*dt)))\r\n\r\n #Redefine and repeat\r\n X0,Y0 = X1,Y1\r\n X1,Y1 = X2,Y2\r\n\r\n #Update and show progress through console\r\n progress = t/T*100\r\n if(self.i%1000 == 0):\r\n print(int(progress),'% done')\r\n\r\n #Once the computation has ended, I compute the kinetic energy,\r\n #the magnitude of the velocity V and the temperature\r\n #(see doc for temperature definition)\r\n self.KE()\r\n self.V = np.sqrt((self.VX**2 + self.VY**2))\r\n self.T = (np.sum(self.V**2,axis=1)/(self.particles.size*2 - 2))\r\n\r\n #Generation of the MB functions, you can modify the definition by\r\n #changing the linspace points\r\n vs,a = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n a,ts = np.meshgrid(np.linspace(0,self.V.max(),100),self.T)\r\n self.MB = (vs/(ts)*np.exp(-vs**2/(2*ts)))\r\n\r\n #JV: If we are on the Subsystems submenu, we will calculate the temperature and the MB distribution of both types of particles\r\n if(self.param[4] == \"Subsystems\"):\r\n\r\n #JV: 1st group of particles\r\n self.V1 = np.sqrt((self.VX[:,0:(self.param[5]**2)]**2 + self.VY[:,0:(self.param[5]**2)]**2))\r\n self.T1 = (np.sum(self.V1**2,axis=1)/((self.param[5]**2)*2 - 2))\r\n\r\n vs1,a1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n a1,ts1 = np.meshgrid(np.linspace(0,self.V1.max(),100),self.T1)\r\n self.MB1 = (vs1/(ts1)*np.exp(-vs1**2/(2*ts1)))\r\n\r\n #JV: 2nd group\r\n self.V2 = np.sqrt((self.VX[:,(self.param[5]**2):self.particles.size]**2 + self.VY[:,(self.param[5]**2):self.particles.size]**2))\r\n self.T2 = (np.sum(self.V2**2,axis=1)/((self.particles.size-self.param[5]**2)*2 - 2))\r\n\r\n vs2,a2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n a2,ts2 = np.meshgrid(np.linspace(0,self.V2.max(),100),self.T2)\r\n self.MB2 = (vs2/(ts2)*np.exp(-vs2**2/(2*ts2)))\r\n\r\n \"\"\"Here I generate the accumulated V,T and MB using lists, the reason I use lists is because if you append two numpy arrays\r\n to an empty numpy array, they merge instead of remaining separate. You could technically use splicing to save on memory\r\n but sacrificing cpu.\"\"\"\r\n\r\n self.Vacu = []\r\n self.Tacu = []\r\n self.MBacu = []\r\n self.Vacu.append(self.V[int(self.n/2),:])\r\n self.Tacu.append(np.sum(self.V[int(self.n/2),:]**2)/(self.particles.size*2 - 2))\r\n\r\n vs = np.linspace(0,self.V.max(),100)\r\n self.MBacu.append((vs/(self.Tacu[0])*np.exp(-vs**2/(2*self.Tacu[0]))))\r\n\r\n #This delta controls the time interval for accumulation, right now its every 5 units\r\n delta = 5./dt\r\n\r\n #This 40 that appers in these lines is the time from which I start accumulating\r\n #to ensure the system has reached equilibrium.\r\n for i in range(1,int((self.n-(40./dt))/delta)):\r\n self.Vacu.append(np.hstack((self.Vacu[i-1],self.V[int(40./dt)+int(i*delta),:])))\r\n self.Tacu.append(np.sum(self.Vacu[i]**2)/(self.Vacu[i].size*2 - 2))\r\n self.MBacu.append((vs/(self.Tacu[i])*np.exp(-vs**2/(2*self.Tacu[i]))))\r\n return", "def calculateSubgridErrorInterpolants(self,ci):\n #now project to finite element space\n if self.usesGradientStabilization:\n #mwf hack!\n #self.strongResidualInterpolant[ci].projectFromInterpolationConditions(self.cip[('pdeResidual',ci)])\n #self.strongResidualInterpolant[ci].projectFromInterpolationConditions(self.cip[('mt',ci)])\n #self.strongResidualInterpolant[ci].projectFromInterpolationConditions(self.cip[('m',ci)]/self.timeIntegration.dt)\n #self.strongResidualInterpolant[ci].projectFromInterpolationConditions(self.cip[('r',ci)])\n self.subgridTmp_ip[ci].fill(0.0)\n if ('mt',ci) in self.cip:\n self.subgridTmp_ip[ci] += self.cip[('mt',ci)]\n if ('r',ci) in self.cip:\n self.subgridTmp_ip[ci] += self.cip[('r',ci)]\n self.strongResidualInterpolant[ci].projectFromInterpolationConditions(self.subgridTmp_ip[ci])", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def solve_stationary_equilibrium(self) :\n \n \n \n \n #a. find the equilibrium wage given the tax rate and subsidy\n w_ss = self.find_equilibrium_wage(self.w0_guess, self.w1_guess)\n \n #b. obtain firm policy functions and discount present value factors\n W_e , pol_k, pol_n, pi, W, pol_enter = self.entry_condition(w_ss)\n \n \n #c. obtain the invariant distribution \n \n #i. normalized invariant distribution over firms\n mu_hat = pol_enter/self.lambdaa * self.joint_pdf\n \n #ii. labor market clearing (section 3.5), agg demand for labor\n N_ss = np.sum(np.sum(pol_n*mu_hat, axis=0))\n \n #iii. ss equilibrium level of entry (mass of entrants)\n E_star = 1/N_ss \n \n #iv. rescale invariant distribution over firms, mu(s,tau)\n mu = E_star*mu_hat\n \n #d. marginal distributions\n \n #i. sum over subsidies, except, taxes of stationary distribution\n distrib_stationary = np.sum(mu, axis=1)\n total_mass = np.sum(distrib_stationary)\n \n #ii. marginal stationary distribution over productivity\n pdf_stationary = distrib_stationary / total_mass\n cdf_stationary = np.cumsum(pdf_stationary)\n \n #iii. stationary distribution over number of employed \n distrib_emp = (pol_n[:,2] * pdf_stationary)/ np.sum(pol_n[:,2] * pdf_stationary)\n pdf_emp = distrib_emp / np.sum(distrib_emp)\n cdf_emp = np.cumsum(pdf_emp)\n \n #e. Aggregate statistics\n \n Y_ss = np.sum(np.sum( self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma*mu, axis=0)) #ss output\n K_ss = np.sum(np.sum(pol_k*mu, axis=0)) #ss capital\n TFP_ss = Y_ss/(N_ss*E_star)/(K_ss/(N_ss*E_star))**self.alpha\n total_employment = np.dot(self.labor_demand_rel, distrib_stationary)\n average_firm_size = total_employment / total_mass\n \n #output share of subsidy, excemption, taxed\n Y_set = np.sum(self.grid_s_matrix * pol_k**self.alpha*pol_n**self.gamma*mu, axis=0) / Y_ss\n \n Y_sub_percent = Y_set[0] #output share of establishments that are receiving a subsidy, Y_s/Y\n Y_exempt_percent = Y_set[1]\n Y_taxed__Percent = Y_set[2]\n \n #the total subsidies paid out to establishments receiving subsidies as a fraction of output. numerator takes first column which is subsidy (S/Y)\n subsidy_size = np.sum(-self.tau_output[:,0]*self.grid_s_matrix[:,0]*pol_k[:,0]**self.alpha \\\n *pol_n[:,0]**self.gamma*mu[:,0]-self.tau_capital[:,0]*self.ret \\\n *pol_k[:,0]*mu[:,0]-self.tau_labor[:,0]*w_ss* \\\n pol_n[:,0]*mu[:,0]) / Y_ss\n \n \n return Y_ss, K_ss, TFP_ss, average_firm_size, E_star, Y_set, subsidy_size, N_ss, w_ss, cdf_stationary, cdf_emp", "def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):", "def Solve(self,iter_val=0):\n\n ### Save Files before solve ###\n self.fprint(\"Saving Input Data\",special=\"header\")\n if \"mesh\" in self.params.output:\n self.problem.dom.Save(val=iter_val)\n if \"initial_guess\" in self.params.output:\n self.problem.bd.SaveInitialGuess(val=iter_val)\n if \"height\" in self.params.output and self.problem.dom.dim == 3:\n self.problem.bd.SaveHeight(val=iter_val)\n if \"turbine_force\" in self.params.output:\n self.problem.farm.SaveRotorDisks(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ####################################################################\n ### This is the better way to define a nonlinear problem but it \n ### doesn't play nice with dolfin_adjoint\n # ### Define Jacobian ###\n # dU = TrialFunction(self.problem.fs.W)\n # J = derivative(self.problem.F, self.problem.up_next, dU)\n\n # ### Setup nonlinear solver ###\n # nonlinear_problem = NonlinearVariationalProblem(self.problem.F, self.problem.up_next, self.problem.bd.bcs, J)\n # nonlinear_solver = NonlinearVariationalSolver(nonlinear_problem)\n\n # ### Set some parameters ###\n # solver_parameters = nonlinear_solver.parameters\n # solver_parameters[\"nonlinear_solver\"] = \"snes\"\n # solver_parameters[\"snes_solver\"][\"linear_solver\"] = \"mumps\"\n # solver_parameters[\"snes_solver\"][\"maximum_iterations\"] = 50\n # solver_parameters[\"snes_solver\"][\"error_on_nonconvergence\"] = False\n # solver_parameters[\"snes_solver\"][\"line_search\"] = \"bt\" # Available: basic, bt, cp, l2, nleqerr\n\n ### Solve the problem ###\n # self.fprint(\"Solving\",special=\"header\")\n # start = time.time()\n # iters, converged = nonlinear_solver.solve()\n # stop = time.time()\n # self.fprint(\"Total Nonlinear Iterations: {:d}\".format(iters))\n # self.fprint(\"Converged Successfully: {0}\".format(converged))\n ####################################################################\n\n\n nonlinear_solver = self.params[\"solver\"].get(\"nonlinear_solver\", \"snes\")\n relaxation = self.params[\"solver\"].get(\"newton_relaxation\", 1.0)\n\n self.fprint(\"Solving with {0}\".format(nonlinear_solver))\n if nonlinear_solver == \"newton\":\n self.fprint(\"Relaxation parameter = {: 1.2f}\".format(relaxation))\n\n newton_options = {\"relaxation_parameter\": relaxation,\n \"maximum_iterations\": 40,\n \"linear_solver\": \"mumps\",\n \"absolute_tolerance\": 1e-6,\n \"relative_tolerance\": 1e-5}\n \n solver_parameters = {\"nonlinear_solver\": \"newton\",\n \"newton_solver\": newton_options}\n\n elif nonlinear_solver == \"snes\":\n # ### Add some helper functions to solver options ###\n solver_parameters = {\"nonlinear_solver\": \"snes\",\n \"snes_solver\": {\n \"linear_solver\": \"mumps\", \n \"maximum_iterations\": 40,\n \"error_on_nonconvergence\": True,\n \"line_search\": \"bt\",\n }}\n \n else:\n raise ValueError(\"Unknown nonlinear solver type: {0}\".format(nonlinear_solver))\n\n ### Start the Solve Process ###\n self.fprint(\"Solving\",special=\"header\")\n start = time.time()\n \n # ### Solve the Baseline Problem ###\n # solve(self.problem.F_sans_tf == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters, **self.extra_kwarg)\n\n # ### Store the Baseline and Assign for the real solve ###\n # self.up_baseline = self.problem.up_next.copy(deepcopy=True)\n # self.problem.up_next.assign(self.up_baseline)\n\n ### Solve the real problem ###\n solve(self.problem.F == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters)\n stop = time.time()\n self.fprint(\"Solve Complete: {:1.2f} s\".format(stop-start),special=\"footer\")\n # self.u_next,self.p_next = self.problem.up_next.split(True)\n self.u_next,self.p_next = split(self.problem.up_next)\n # self.nu_T = project(self.problem.nu_T,self.problem.fs.Q,solver_type='mumps',**self.extra_kwarg)\n self.nu_T = None\n\n\n ### Save solutions ###\n if \"solution\" in self.params.output:\n self.fprint(\"Saving Solution\",special=\"header\")\n self.Save(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ### calculate the power for each turbine ###\n ###################################\n ### Fix how angle is transfered ###\n ###################################\n if self.optimizing or self.save_power:\n self.J += -self.CalculatePowerFunctional((iter_val-self.problem.dom.init_wind)) \n\n # self.fprint(\"Speed Percent of Inflow Speed\")\n # ps = []\n # for i in range(6):\n # HH = self.problem.farm.HH[0]\n # RD = self.problem.farm.RD[0]\n # x_val = (i+1)*RD\n # vel = self.problem.up_next([x_val,0,HH])\n # vel = vel[0:3]\n # nom = np.linalg.norm(vel)\n # perc = nom/self.problem.bd.HH_vel\n # ps.append(perc)\n # self.fprint(\"Speed Percent at (\"+repr(int(x_val))+\", 0, \"+repr(HH)+\"): \"+repr(perc))\n # print(ps)", "def solve_euler(Npts, IC, tout, Ca = 0.7, lagrangian=False, HLLC=True):\n # Setup up the grid\n stencil = 2\n \n xe = np.linspace(0.0, 1.0, Npts+1)\n xc = 0.5*(xe[1:] + xe[:-1])\n\n def boundary(xc, Q):\n # Add periodic boundaries to Q\n Qb = np.empty([Npts+2*stencil, NHYDRO])\n Qb[stencil:-stencil] = Q\n Qb[ :stencil] = Qb[Npts:Npts+stencil]\n Qb[-stencil:] = Qb[stencil:2*stencil]\n\n # Add periodic boundaries for cell centres and compute interfaces\n xc_b = np.empty(Npts+2*(stencil+1))\n xc_b[(stencil+1):-(stencil+1)] = xc\n xc_b[ :(stencil+1)] = xc[-(stencil+1):] - 1\n xc_b[-(stencil+1):] = xc[ :(stencil+1)] + 1\n\n xe = 0.5*(xc_b[1:] + xc_b[:-1])\n xc_b = xc_b[1:-1]\n\n return xc_b, xe, Qb\n\n def RK2_prim(xc_in, Q, dt):\n #1. Apply Boundaries\n xc, xe, Qb = boundary(xc_in, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #2. Compute Primitive variables\n Ub = Qb / dx\n Wb = cons2prim(Ub)\n\n #3. Compute gradients\n grad = compute_gradients(xc, xe, Wb)\n\n #4. Set interface velocities:\n if lagrangian:\n vc = Wb[:,1].copy()\n else:\n vc = np.zeros_like(Wb[:,1])\n f = (xe[1:-1] - xc[:-1]) / (xc[1:]-xc[:-1])\n vf = f*vc[1:] + (1-f)*vc[:-1]\n\n #5. Compute edge states:\n Wp = Wb[1:-1] + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Wb[1:-1] + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #6. Compute first fluxes:\n if HLLC:\n flux_0 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_0 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #7. Move the mesh and compute new face locations:\n xc = xc_in + vc[stencil:-stencil]*dt\n xc, xe, _ = boundary(xc, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #8. Predict edge states at t+dt\n # 8a. First predict the mid-points at t+dt\n dWdt = compute_time_diff_W(Wb[1:-1], grad, vc[1:-1]) \n Ws0 = Wb[1:-1] + dt*dWdt\n Ws = Wb[1:-1] + dt*dWdt\n\n # 8b. Apply the drag forces using Exponential Euler method\n rho = Ws[:,0] + FB*Ws[:,3]\n \n v_com = (Ws[:,0]*Ws[:,1] + FB*Ws[:,3]*Ws[:,4])/rho\n dV = (Wb[1:-1,4] - Wb[1:-1,1]) * np.exp(-K*rho*dt) \n da = (dWdt[:,4] - dWdt[:,1]) *-np.expm1(-dt*K*rho)/(K*rho)\n\n Ws[:,1] = v_com - FB*Ws[:,3]*(dV + da)/rho\n Ws[:,4] = v_com + Ws[:,0]*(dV + da)/rho\n \n # Heating due to drag\n dEk = 0.5*(Ws[:,0]*Ws[:,1]**2 - Ws0[:,0]*Ws0[:,1]**2 +\n Ws[:,3]*Ws[:,4]**2 - Ws0[:,3]*Ws0[:,4]**2)\n Ws[:,2] -= dEk * (GAMMA-1)\n \n # 8c. Reconstruct the edge states\n Wp = Ws + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Ws + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #9. Compute second fluxes\n if HLLC:\n flux_1 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_1 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #10. Compute the drag terms using 2nd order exponential Runge-Kutta method.\n f_g0 = -np.diff(flux_0[:,1]) ; f_g1 = -np.diff(flux_1[:,1])\n f_d0 = -np.diff(flux_0[:,4]) ; f_d1 = -np.diff(flux_1[:,4])\n\n Qn = Q - 0.5*dt*np.diff(flux_0 + flux_1, axis=0) \n\n m_com = Qn[:,1] + FB*Qn[:,4]\n \n rho = Qn[:,0] + FB*Qn[:,3]\n eps_g = Qn[:,0] / rho ; eps_d = Qn[:,3] / rho\n rho /= np.diff(xe[stencil:-stencil])\n\n df = (eps_g*(f_d0+f_d1) - eps_d*(f_g0+f_g1)) / 2\n\n dm = (eps_g*Q[:,4] - eps_d*Q[:,1]) * np.exp(-K*rho*dt) \n dm += df *-np.expm1(-dt*K*rho)/(K*rho)\n \n m_d = eps_d * m_com + dm\n m_g = eps_g * m_com - dm*FB\n\n #11. Update Conserved quantities\n Q[:] = Qn\n\n Q[:,1] = m_g\n Q[:,4] = m_d\n\n # Heating due to drag to conserve energy\n if FB:\n Q[:,2] -= 0.5*(Q[:,4]**2 - Qn[:,4]**2) / Q[:,3]\n \n\n # Return\n xc = xc[stencil:-stencil]\n xe = xe[stencil:-stencil]\n\n return xc, xe, Q\n\n # Set the initial conditions\n dx = np.diff(xe).reshape(-1,1)\n W = IC(xe)\n U = prim2cons(W)\n Q = U * dx\n\n t = 0\n while t < tout:\n\n U = Q/dx\n\n vf = 0\n if lagrangian:\n vf = U[:,1] / U[:,0]\n dtmax = Ca * np.min(dx / max_wave_speed(U, vf))\n dt = min(dtmax, tout-t)\n\n xc, xe, Q = RK2_prim(xc, Q, dt)\n dx = np.diff(xe).reshape(-1,1)\n \n t = min(tout, t+dt)\n \n\n return xc, xe, cons2prim(Q/dx)", "def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy", "def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))", "def exhaust(self):\n svr = self.svr\n for it, fluid in enumerate(svr.fluids):\n assert np.allclose(svr.soln[svr.blk.shclgrp==it,0], fluid.rho)\n vel = svr.velocities[it]\n for idim in range(svr.ndim):\n val = fluid.rho * vel[idim]\n assert np.allclose(svr.soln[svr.blk.shclgrp==it,idim+1], val)", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def f1_part_i(x,m_ind):\n\n #f = max(2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] ,2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1])\n tmp1 = 2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1]\n tmp2 = 2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n \n # checking absolute value of rho-b_i = rho1-rho2-b_i\n #if (tmp1-tmp2 > cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 1.0\n #if (tmp1-tmp2 == cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 0.5\n #else:\n # cfg.alpha[m_ind] = 0.0\n \n # checking maximum used in rho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n \n return f", "def MiyamotoNagaiAccel(self, M, rd, r):\n R = np.sqrt(r[0]**2 + r[1]**2) #Finding magnitude of x and y compnets\n zd = rd/5. #Calculating \"zd\"\n B = rd + np.sqrt(r[2]**2 + zd**2) #Calclating \"B\"\n zstuff = 1/np.sqrt(r[2]**2 + zd**2) #Calculating stuff that only appears in z componet\n MNa = -self.G*M/(R**2+B**2)**1.5 * r * np.array([1,1,zstuff]) #Putting it all together\n\n return MNa", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def deposit_J_gpu(x, y, z, w,\r\n ux, uy, uz, inv_gamma,\r\n invdz, zmin, Nz,\r\n invdr, rmin, Nr,\r\n J0, J1,\r\n J2, J3,\r\n cell_idx, prefix_sum):\r\n # Get the 1D CUDA grid\r\n i = cuda.grid(1)\r\n # Deposit the field per cell in parallel (for threads < number of cells)\r\n if i < prefix_sum.shape[0]:\r\n # Calculate the cell index in 2D from the 1D threadIdx\r\n iz = int(i / Nr)\r\n ir = int(i - iz * Nr)\r\n # Calculate the inclusive offset for the current cell\r\n # It represents the number of particles contained in all other cells\r\n # with an index smaller than i + the total number of particles in the\r\n # current cell (inclusive).\r\n incl_offset = np.int32(prefix_sum[i])\r\n # Calculate the frequency per cell from the offset and the previous\r\n # offset (prefix_sum[i-1]).\r\n if i > 0:\r\n frequency_per_cell = np.int32(incl_offset - prefix_sum[i - 1])\r\n if i == 0:\r\n frequency_per_cell = np.int32(incl_offset)\r\n # Initialize the local field value for\r\n # all four possible deposition directions\r\n # Mode 0, 1 for r, t, z\r\n # 1 : lower in r, lower in z\r\n # 2 : lower in r, upper in z\r\n # 3 : upper in r, lower in z\r\n # 4 : upper in r, upper in z\r\n Jr1_m0 = 0. + 0.j\r\n Jr2_m0 = 0. + 0.j\r\n Jr3_m0 = 0. + 0.j\r\n Jr4_m0 = 0. + 0.j\r\n # -------------\r\n Jr1_m1 = 0. + 0.j\r\n Jr2_m1 = 0. + 0.j\r\n Jr3_m1 = 0. + 0.j\r\n Jr4_m1 = 0. + 0.j\r\n # -------------\r\n Jt1_m0 = 0. + 0.j\r\n Jt2_m0 = 0. + 0.j\r\n Jt3_m0 = 0. + 0.j\r\n Jt4_m0 = 0. + 0.j\r\n # -------------\r\n Jt1_m1 = 0. + 0.j\r\n Jt2_m1 = 0. + 0.j\r\n Jt3_m1 = 0. + 0.j\r\n Jt4_m1 = 0. + 0.j\r\n # -------------\r\n Jz1_m0 = 0. + 0.j\r\n Jz2_m0 = 0. + 0.j\r\n Jz3_m0 = 0. + 0.j\r\n Jz4_m0 = 0. + 0.j\r\n # -------------\r\n Jz1_m1 = 0. + 0.j\r\n Jz2_m1 = 0. + 0.j\r\n Jz3_m1 = 0. + 0.j\r\n Jz4_m1 = 0. + 0.j\r\n # Loop over the number of particles per cell\r\n for j in range(frequency_per_cell):\r\n # Get the particle index\r\n # ----------------------\r\n # (Since incl_offset is a cumulative sum of particle number,\r\n # and since python index starts at 0, one has to add -1)\r\n ptcl_idx = incl_offset - 1 - j\r\n\r\n # Preliminary arrays for the cylindrical conversion\r\n # --------------------------------------------\r\n # Position\r\n xj = x[ptcl_idx]\r\n yj = y[ptcl_idx]\r\n zj = z[ptcl_idx]\r\n # Velocity\r\n uxj = ux[ptcl_idx]\r\n uyj = uy[ptcl_idx]\r\n uzj = uz[ptcl_idx]\r\n # Inverse gamma\r\n inv_gammaj = inv_gamma[ptcl_idx]\r\n # Weights\r\n wj = w[ptcl_idx]\r\n\r\n # Cylindrical conversion\r\n rj = math.sqrt(xj**2 + yj**2)\r\n # Avoid division by 0.\r\n if (rj != 0.):\r\n invr = 1. / rj\r\n cos = xj * invr # Cosine\r\n sin = yj * invr # Sine\r\n else:\r\n cos = 1.\r\n sin = 0.\r\n exptheta_m0 = 1.\r\n exptheta_m1 = cos + 1.j * sin\r\n\r\n # Get linear weights for the deposition\r\n # --------------------------------------------\r\n # Positions of the particles, in the cell unit\r\n r_cell = invdr * (rj - rmin) - 0.5\r\n z_cell = invdz * (zj - zmin) - 0.5\r\n # Original index of the uppper and lower cell\r\n # in r and z\r\n ir_lower = int(math.floor(r_cell))\r\n ir_upper = ir_lower + 1\r\n iz_lower = int(math.floor(z_cell))\r\n iz_upper = iz_lower + 1\r\n # Linear weight\r\n Sr_lower = ir_upper - r_cell\r\n Sr_upper = r_cell - ir_lower\r\n Sz_lower = iz_upper - z_cell\r\n Sz_upper = z_cell - iz_lower\r\n # Set guard weights to zero\r\n Sr_guard = 0.\r\n\r\n # Treat the boundary conditions\r\n # --------------------------------------------\r\n # guard cells in lower r\r\n if ir_lower < 0:\r\n Sr_guard = Sr_lower\r\n Sr_lower = 0.\r\n ir_lower = 0\r\n # absorbing in upper r\r\n if ir_lower > Nr - 1:\r\n ir_lower = Nr - 1\r\n if ir_upper > Nr - 1:\r\n ir_upper = Nr - 1\r\n # periodic boundaries in z\r\n # lower z boundaries\r\n if iz_lower < 0:\r\n iz_lower += Nz\r\n if iz_upper < 0:\r\n iz_upper += Nz\r\n # upper z boundaries\r\n if iz_lower > Nz - 1:\r\n iz_lower -= Nz\r\n if iz_upper > Nz - 1:\r\n iz_upper -= Nz\r\n\r\n # Calculate the currents\r\n # --------------------------------------------\r\n # Mode 0\r\n Jr_m0 = wj * c * inv_gammaj * (cos * uxj + sin * uyj) * exptheta_m0\r\n Jt_m0 = wj * c * inv_gammaj * (cos * uyj - sin * uxj) * exptheta_m0\r\n Jz_m0 = wj * c * inv_gammaj * uzj * exptheta_m0\r\n # Mode 1\r\n Jr_m1 = wj * c * inv_gammaj * (cos * uxj + sin * uyj) * exptheta_m1\r\n Jt_m1 = wj * c * inv_gammaj * (cos * uyj - sin * uxj) * exptheta_m1\r\n Jz_m1 = wj * c * inv_gammaj * uzj * exptheta_m1\r\n\r\n # Caculate the weighted currents for each\r\n # of the four possible direction\r\n # --------------------------------------------\r\n if ir_lower == ir_upper:\r\n # In the case that ir_lower and ir_upper are equal,\r\n # the current is added only to the array corresponding\r\n # to ir_lower.\r\n # (This is the case for the boundaries in r)\r\n Jr1_m0 += Sz_lower * Sr_lower * Jr_m0\r\n Jr1_m0 += Sz_lower * Sr_upper * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_lower * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_upper * Jr_m0\r\n # -------------------------------\r\n Jr1_m1 += Sz_lower * Sr_lower * Jr_m1\r\n Jr1_m1 += Sz_lower * Sr_upper * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_lower * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_upper * Jr_m1\r\n # -------------------------------\r\n Jt1_m0 += Sz_lower * Sr_lower * Jt_m0\r\n Jt1_m0 += Sz_lower * Sr_upper * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_lower * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_upper * Jt_m0\r\n # -------------------------------\r\n Jt1_m1 += Sz_lower * Sr_lower * Jt_m1\r\n Jt1_m1 += Sz_lower * Sr_upper * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_lower * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_upper * Jt_m1\r\n # -------------------------------\r\n Jz1_m0 += Sz_lower * Sr_lower * Jz_m0\r\n Jz1_m0 += Sz_lower * Sr_upper * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_lower * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_upper * Jz_m0\r\n # -------------------------------\r\n Jz1_m1 += Sz_lower * Sr_lower * Jz_m1\r\n Jz1_m1 += Sz_lower * Sr_upper * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_lower * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_upper * Jz_m1\r\n # -------------------------------\r\n if ir_lower != ir_upper:\r\n # In the case that ir_lower and ir_upper are different,\r\n # add the current to the four arrays according to\r\n # the direction.\r\n Jr1_m0 += Sz_lower * Sr_lower * Jr_m0\r\n Jr2_m0 += Sz_lower * Sr_upper * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_lower * Jr_m0\r\n Jr4_m0 += Sz_upper * Sr_upper * Jr_m0\r\n # -------------------------------\r\n Jr1_m1 += Sz_lower * Sr_lower * Jr_m1\r\n Jr2_m1 += Sz_lower * Sr_upper * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_lower * Jr_m1\r\n Jr4_m1 += Sz_upper * Sr_upper * Jr_m1\r\n # -------------------------------\r\n Jt1_m0 += Sz_lower * Sr_lower * Jt_m0\r\n Jt2_m0 += Sz_lower * Sr_upper * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_lower * Jt_m0\r\n Jt4_m0 += Sz_upper * Sr_upper * Jt_m0\r\n # -------------------------------\r\n Jt1_m1 += Sz_lower * Sr_lower * Jt_m1\r\n Jt2_m1 += Sz_lower * Sr_upper * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_lower * Jt_m1\r\n Jt4_m1 += Sz_upper * Sr_upper * Jt_m1\r\n # -------------------------------\r\n Jz1_m0 += Sz_lower * Sr_lower * Jz_m0\r\n Jz2_m0 += Sz_lower * Sr_upper * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_lower * Jz_m0\r\n Jz4_m0 += Sz_upper * Sr_upper * Jz_m0\r\n # -------------------------------\r\n Jz1_m1 += Sz_lower * Sr_lower * Jz_m1\r\n Jz2_m1 += Sz_lower * Sr_upper * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_lower * Jz_m1\r\n Jz4_m1 += Sz_upper * Sr_upper * Jz_m1\r\n # -------------------------------\r\n if ir_lower == ir_upper == 0:\r\n # Treat the guard cells.\r\n # Add the current to the guard cells\r\n # for particles that had an original\r\n # cell index < 0.\r\n Jr1_m0 += -1. * Sz_lower * Sr_guard * Jr_m0\r\n Jr3_m0 += -1. * Sz_upper * Sr_guard * Jr_m0\r\n # -----------------------------------\r\n Jr1_m1 += -1. * Sz_lower * Sr_guard * Jr_m1\r\n Jr3_m1 += -1. * Sz_upper * Sr_guard * Jr_m1\r\n # -----------------------------------\r\n Jt1_m0 += -1. * Sz_lower * Sr_guard * Jt_m0\r\n Jt3_m0 += -1. * Sz_upper * Sr_guard * Jt_m0\r\n # -----------------------------------\r\n Jt1_m1 += -1. * Sz_lower * Sr_guard * Jt_m1\r\n Jt3_m1 += -1. * Sz_upper * Sr_guard * Jt_m1\r\n # -----------------------------------\r\n Jz1_m0 += -1. * Sz_lower * Sr_guard * Jz_m0\r\n Jz3_m0 += -1. * Sz_upper * Sr_guard * Jz_m0\r\n # -----------------------------------\r\n Jz1_m1 += -1. * Sz_lower * Sr_guard * Jz_m1\r\n Jz3_m1 += -1. * Sz_upper * Sr_guard * Jz_m1\r\n # Write the calculated field values to\r\n # the field arrays defined on the interpolation grid\r\n J0[iz, ir, 0] = Jr1_m0\r\n J0[iz, ir, 1] = Jr1_m1\r\n J0[iz, ir, 2] = Jt1_m0\r\n J0[iz, ir, 3] = Jt1_m1\r\n J0[iz, ir, 4] = Jz1_m0\r\n J0[iz, ir, 5] = Jz1_m1\r\n # --------------------\r\n J1[iz, ir, 0] = Jr2_m0\r\n J1[iz, ir, 1] = Jr2_m1\r\n J1[iz, ir, 2] = Jt2_m0\r\n J1[iz, ir, 3] = Jt2_m1\r\n J1[iz, ir, 4] = Jz2_m0\r\n J1[iz, ir, 5] = Jz2_m1\r\n # --------------------\r\n J2[iz, ir, 0] = Jr3_m0\r\n J2[iz, ir, 1] = Jr3_m1\r\n J2[iz, ir, 2] = Jt3_m0\r\n J2[iz, ir, 3] = Jt3_m1\r\n J2[iz, ir, 4] = Jz3_m0\r\n J2[iz, ir, 5] = Jz3_m1\r\n # --------------------\r\n J3[iz, ir, 0] = Jr4_m0\r\n J3[iz, ir, 1] = Jr4_m1\r\n J3[iz, ir, 2] = Jt4_m0\r\n J3[iz, ir, 3] = Jt4_m1\r\n J3[iz, ir, 4] = Jz4_m0\r\n J3[iz, ir, 5] = Jz4_m1", "def equiangulate(self,x,mask):\n\n timeout = 100\n k = 0\n while (not mask.all())and(k<timeout):\n\n changed_tris,j = np.nonzero(~mask)\n chosen_cell = changed_tris[0]\n cell_mask = np.zeros(3,dtype=np.bool)\n cell_mask[j[0]] = True\n chosen_opposite_cell = self.v_neighbours[chosen_cell,cell_mask][0]\n\n\n cells = np.roll(self.tris[chosen_cell],-j[0])\n opposite_cells = self.tris[chosen_opposite_cell]\n opposite_cells = np.roll(opposite_cells, - self.k2s[chosen_cell,cell_mask])\n\n\n self.tris[chosen_cell] = cells[0], opposite_cells[0],cells[2]\n self.tris[chosen_opposite_cell] = opposite_cells[0],cells[0], opposite_cells[2]\n\n self.Angles[[chosen_cell,chosen_opposite_cell]] = tri_angles_periodic(x, self.tris[[chosen_cell,chosen_opposite_cell]], self.L)\n # self.Angles = tri_angles_periodic(x,self.tris,self.L)\n self.Cents = x[self.tris]\n self.vs = self.get_vertex_periodic()\n\n\n modify_neighbours = np.concatenate([self.v_neighbours[chosen_cell],self.v_neighbours[chosen_opposite_cell]])\n modify_neighbours.sort()\n self.v_neighbours[modify_neighbours] = -1\n\n\n n_neigh = get_neighbours(self.tris,self.v_neighbours,Range = modify_neighbours)\n self.v_neighbours = n_neigh\n self.neighbours = self.vs[n_neigh]\n\n self.k2s = get_k2(self.tris, self.v_neighbours)\n if (self.k2s>=3).sum()!=0:\n self._triangulate_periodic(x)\n self.k2s = get_k2(self.tris, self.v_neighbours)\n mask[:] = True\n else:\n mask = ((self.Angles[self.v_neighbours, self.k2s] + self.Angles) < np.pi)\n k+=1\n if k == timeout:\n self._triangulate_periodic(x)\n self.k2s = get_k2(self.tris, self.v_neighbours)", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def slepian(m, jres, kt=5):\n u = [[0]*kt]*kt\n # output table for the dpss (digital prolate spheroidal sequence)\n # these sequences give hte main lobe of the maximal energy concentration of the Slepian function\n eps = 1e-10\n m2 = 2*m\n (dg, dgg, gam) = ([0]*m2,[0]*m2),[0]*m2))\n (sup, sub) = ([0]*(m2-1), [0](m2-1))\n sw = 2*np.sqrt(np.sin(jres(np.pi/m2)))\n dg[0] = .25*(2*m2+sw*np.sqrt(m2-1)-1) # set up diagonal matrix\n for i in range(1, m2+1):\n dg[i] = .25*(s2*np.sqrt(m2-1-2*1)+(2*(m2-i)-1)*(2*i+1))\n sub[i-i] = sub[i-1] = -t*(m2-i)/2\n xx = 0.10859 - .068762/jres + 1.5692*jres # guess eigenvalue\n xold = xx + .47276 + .20273/jres - 2.1387*jres\n for k in range(0, kt+1):\n for i in range(0, 21): # loop over iterations of Newton's method\n pp = 1\n p = dg[0] - xx\n dd = 0\n d = -1\n for j in range(1, m2+1): # recurrence evaluates polynomial and derivative\n ppp = pp\n pp = p\n ddd = dd\n dd = d\n p = p*(dg[j] - xx) - ppp*np.sqrt(sup[j-1])\n d = -pp + dd(dg[j] - xx) - ddd*np.sqrt(sup[j-1])\n if abs(p) > 1e30:\n renorm(-100)\n elif abs(p) <= 1e30:\n renorm(100)\n xnew = xx - p/d\n if abs(xx-xnew) < eps*abs(xnew):\n break\n xx = xnew\n xx = xnew - (xold - xnew)\n xold = xnew\n for i in range(0, m2+1):\n dgg[i] = dg[i] - xnew # subtract eigenvalue from matrix diagonal\n nl = m2/3\n ssup = sup[nl] # set one component and prepare for tridiagonal solution.\n ssub = sub[nl-1]\n u[0] = sup[nl] = sub[nl -1] = 0\n bet = dgg[0]\n for i in range(1, m2+1): # tridagonal solution\n gam[i] = sup[i-1]/bet\n bet = dgg[i] - subp[i-1]*gam[i]\n if i == nl:\n u[i] = 0\n else:\n u[i] = -sub[i-1]*u[i-1]/bet\n for i in range(m2-2, -1, -1):\n sup[nl] = ssup # restore saved values\n sub[nl-1] = ssub\n sumvalue = 0\n for i in range(0, m2+1):\n if u[3] > 0:\n sumvalue = np.sqrt(sumvalue)\n else:\n sumvalue = -np.sqrt(sumvalue)\n for i in range(0, m2+1):\n u[i] /= sumvalue\n return u", "def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)", "def __call__(self):\n\n if self.indices is []:\n return\n\n\n #-----------------------------------------\n # Here is where the important formula is applied\n #----------------------------------------\n if self.indices is None:\n height = self.stage_c - self.elev_c\n self.friction_c[:] = self.friction(height)\n else:\n ind = self.indices\n height = self.stage_c[ind] - self.elev_c[ind]\n self.friction_c[ind] = self.friction(height)", "def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )", "def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc", "def __init__(self, mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s, \n mu_f, rho_f, result, *args, **kwargs):\n\n self.mesh = mesh\n self.dt = Constant(dt)\n self.theta = theta\n self.t = 0.0\n self.v_max = v_max\n\n self.mu_f = mu_f\n self.rho_f = rho_f\n self.lambda_s = lambda_s\n self.mu_s = mu_s\n self.rho_s = rho_s\n \n self.bndry = bndry\n self.interface = interface\n\n # bounding box tree\n self.bb = BoundingBoxTree()\n self.bb.build(self.mesh)\n\n # Define finite elements\n eV = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# velocity element\n eB = VectorElement(\"Bubble\", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element\n eU = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# displacement element\n eP = FiniteElement(\"DG\", mesh.ufl_cell(), 1)\t\t# pressure element\n\n eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element\n W = FunctionSpace(self.mesh, eW) # mixed space\n self.W = W\n self.V = FunctionSpace(self.mesh, eV)\n\n # Set boundary conditions\n self.v_in = Expression((\"t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \\\n v_max*4/(gW*gW)*(x[1]*(gW - x[1]))\", \"0.0\"),\n degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)\n\n #info(\"Expression set.\")\n bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)\n bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)\n bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)\n self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]\n\n #info(\"Mesh BC.\")\n bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)\n self.bcs_mesh = [bc_mesh]\n\n\n #info(\"Normal and Circumradius.\")\n self.n = FacetNormal(self.mesh)\n self.h = Circumradius(self.mesh)\n I = Identity(self.W.mesh().geometry().dim())\n\n # Define functions\n self.w = Function(self.W) # solution to current time step\n self.w0 = Function(self.W) # solution from previous time step\n\n (v__, bv_, u__, bu_, p_) = TestFunctions(self.W)\n\n # sum bubble elements with corresponding Lagrange elements\n v_ = v__ + bv_\n u_ = u__ + bu_\n (v, bv, u, bu, self.p) = split(self.w)\n self.v = v + bv\n self.u = u + bu\n (v0, bv0, u0, bu0, self.p0) = split(self.w0)\n self.v0 = v0 + bv0\n self.u0 = u0 + bu0\n\n\n # define deformation gradient, Jacobian\n self.FF = I + grad(self.u)\n self.FF0 = I + grad(self.u0)\n self.JJ = det(self.FF)\n self.JJ0 = det(self.FF0)\n\n # write ALE mesh movement \n self.gamma = 9.0/8.0\n h = CellVolume(self.mesh)**(self.gamma)\n E = Constant(1.0)\n\n E_mesh = E/h\n nu_mesh = Constant(-0.02)\n\n mu_mesh = E_mesh/(2*(1.0+nu_mesh))\n lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))\n\n F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \\\n + lambda_mesh*inner(div(self.u), div(u_))*dx(0)\n\n\n # define referential Grad and Div shortcuts\n def Grad(f, F): return dot( grad(f), inv(F) )\n def Div(f, F): return tr( Grad(f, F) )\n\n # approximate time derivatives\n du = (1.0/self.dt)*(self.u - self.u0)\n dv = (1.0/self.dt)*(self.v - self.v0)\n\n # compute velocuty part of Cauchy stress tensor for fluid\n self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))\n self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))\n\n # Compute 1st Piola-Kirhhoff tensro for fluid \n # - for computing surface integrals for forces in postprocessing \n self.S_f = self.JJ *self.T_f*inv(self.FF).T\n \n # write equations for fluid\n a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)\n a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)\n\n b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n\n self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\\\n + self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \\\n + F_mesh\n\n # compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)\n B_s = self.FF.T *self.FF\n B_s0 = self.FF0.T*self.FF0\n S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))\n S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))\n\n # write equation for solid\n alpha = Constant(1.0) # Constant(1e10) #\n self.F_solid = rho_s*inner(dv, v_)*dx(1) \\\n + self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \\\n + alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)\n\n\n dF_solid = derivative(self.F_solid, self.w)\n dF_fluid = derivative(self.F_fluid, self.w)\n\n self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)\n self.solver = NewtonSolver()\n\n # configure solver parameters\n self.solver.parameters['relative_tolerance'] = 1e-6\n self.solver.parameters['maximum_iterations'] = 15\n self.solver.parameters['linear_solver'] = 'mumps'\n\n # create files for saving\n if my_rank == 0:\n if not os.path.exists(result):\n os.makedirs(result)\n self.vfile = XDMFFile(\"%s/velocity.xdmf\" % result)\n self.ufile = XDMFFile(\"%s/displacement.xdmf\" % result)\n self.pfile = XDMFFile(\"%s/pressure.xdmf\" % result)\n self.sfile = XDMFFile(\"%s/stress.xdmf\" % result)\n self.vfile.parameters[\"flush_output\"] = True\n self.ufile.parameters[\"flush_output\"] = True\n self.pfile.parameters[\"flush_output\"] = True\n self.sfile.parameters[\"flush_output\"] = True\n with open(result+'/data.csv', 'w') as data_file:\n writer = csv.writer(data_file, delimiter=';', lineterminator='\\n')\n writer.writerow(['time', 'mean pressure on outflow', 'pressure_jump', \n 'x-coordinate of end of beam', 'y-coordinate of end of beam',\n 'pressure difference', \n 'drag_circle', 'drag_fluid', 'drag_solid', 'drag_fullfluid',\n 'lift_circle', 'lift_fluid', 'lift_solid', 'lift_fullfluid'])", "def set_ic(self, problem, eos):\n i_min = self.i_min\n j_min = self.j_min\n k_min = self.k_min\n i_max = self.i_max \n j_max = self.j_max\n k_max = self.k_max\n if problem.type == \"RP\":\n ro_l = problem.ro_l\n ro_r = problem.ro_r\n p_l = problem.p_l\n p_r = problem.p_r\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n if problem.dir=='x':\n u_l = problem.u_l\n u_r = problem.u_r\n v_l = 0.\n w_l = 0.\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l*u_l/2. + v_l*v_l/2. + w_l*w_l/2.\n v_r = 0.\n w_r = 0.\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r*u_r/2. + v_r*v_r/2. + w_r*w_r/2.\n if self.x_mesh[i] < problem.q_0 and math.fabs(self.x_mesh[i]-problem.q_0)>self.dx/100.:\n self.U[i][j][k] = [ro_l, ro_l*u_l, ro_l*v_l, ro_l*w_l, ro_l*E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r*u_r, ro_r*v_r, ro_r*w_r, ro_r*E_r]\n elif problem.dir == 'y':\n u_l = 0.\n v_l = problem.u_l\n w_l = 0.\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l * u_l / 2. + v_l * v_l / 2. + w_l * w_l / 2.\n u_r = 0.\n v_r = problem.u_r\n w_r = 0.\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r * u_r / 2. + v_r * v_r / 2. + w_r * w_r / 2.\n if self.y_mesh[j] < problem.q_0 and math.fabs(self.y_mesh[j] - problem.q_0) > self.dy / 100.:\n self.U[i][j][k] = [ro_l, ro_l * u_l, ro_l * v_l, ro_l * w_l, ro_l * E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r * u_r, ro_r * v_r, ro_r * w_r, ro_r * E_r]\n elif problem.dir == 'z':\n u_l = 0.\n v_l = 0.\n w_l = problem.u_l\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l * u_l / 2. + v_l * v_l / 2. + w_l * w_l / 2.\n u_r = 0.\n v_r = 0.\n w_r = problem.u_r\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r * u_r / 2. + v_r * v_r / 2. + w_r * w_r / 2.\n if self.z_mesh[k] < problem.q_0 and math.fabs(self.z_mesh[k] - problem.q_0) > self.dz / 100.:\n self.U[i][j][k] = [ro_l, ro_l * u_l, ro_l * v_l, ro_l * w_l, ro_l * E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r * u_r, ro_r * v_r, ro_r * w_r, ro_r * E_r]\n else:\n print(\"Error: CField.set_ic(): Sorry, only x-direction case can be considered. Bye!\")\n exit(-1)\n elif problem.type == \"RTI\":\n U = self.U\n ro_down = problem.ro_down\n ro_up = problem.ro_up\n u = 0.\n v = 0.\n w = 0.\n p_0 = problem.p_0\n g = problem.g\n q_0 = problem.q_0\n p = 0.\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n x = .5*self.dx + self.x_mesh[i]\n y = .5*self.dy + self.y_mesh[j]\n z = .5*self.dz + self.z_mesh[k]\n if problem.dir == 'x':\n q = x\n elif problem.dir == 'y':\n q = y\n else:\n q = z\n if q < q_0:\n ro = ro_down\n else:\n ro = ro_up\n p = p_0 + ro*g*(q - q_0)\n e = eos.gete(ro, p)\n E = e + .5*(0.*0. + 0.*0. + 0.*0.)\n self.U[i][j][k] = [ro, ro*u, ro*v, ro*w, ro*E]\n # Apply initial disturbance\n # Uncomment the variant you prefer\n # Yalinewich 2D disturbance\n PI = 3.14159\n w_0 = 0.0025\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n # x = self.dx * (.5 + self.x_mesh[i])\n # y = self.dy * (.5 + self.y_mesh[j])\n # z = self.dz * (.5 + self.z_mesh[k])\n x = .5 * self.dx + self.x_mesh[i]\n y = .5 * self.dy + self.y_mesh[j]\n z = .5 * self.dz + self.z_mesh[k]\n if problem.dir == 'x':\n self.U[i][j][k][3] = 0.\n self.U[i][j][k][1] = self.U[i][j][k][0]*w_0* \\\n (1. - math.cos(4.*PI*z)) * (1.-math.cos(4.*PI*x/3.))\n elif problem.dir == 'y':\n U[i][j][k][1] = 0.\n U[i][j][k][2] = U[i][j][k][0]*w_0*(1. - math.cos(4.*PI*x)) * (1.-math.cos(4.*PI*y/3.))\n elif problem.dir == 'z':\n self.U[i][j][k][2] = 0.\n self.U[i][j][k][3] = self.U[i][j][k][0]*w_0* \\\n (1. - math.cos(4.*PI*y)) * (1.-math.cos(4.*PI*z/3.))\n else:\n print(\"Error: CField.set_ic(): unknown problem type! Only 1d-PRs and 2d-RTIs allowed. Bye!\")\n exit(-1)\n return", "def assert_linear_elastic_block(self,simulation, top_node_nbrs, n_dim):\n total_stresses = test_helper.get_total_stress_tensor(simulation)\n total_stresses_xx = [integration_point[0,0] for element in total_stresses for integration_point in element]\n if n_dim >= 2:\n total_stresses_yy = [integration_point[1,1] for element in total_stresses for integration_point in element]\n if n_dim >= 3:\n total_stresses_zz = [integration_point[2,2] for element in total_stresses for integration_point in element]\n\n effective_stresses = test_helper.get_cauchy_stress_tensor(simulation)\n effective_stresses_xx = [integration_point[0,0] for element in effective_stresses for integration_point in element]\n if n_dim >= 2:\n effective_stresses_yy = [integration_point[1,1] for element in effective_stresses for integration_point in element]\n if n_dim >= 3:\n effective_stresses_zz = [integration_point[2,2] for element in effective_stresses for integration_point in element]\n\n displacements = test_helper.get_displacement(simulation)\n x_displacements = [displacement[0] for displacement in displacements]\n if n_dim >= 2:\n y_displacements = [displacement[1] for displacement in displacements]\n if n_dim >= 3:\n z_displacements = [displacement[2] for displacement in displacements]\n\n green_lagrange_strains = test_helper.get_green_lagrange_strain_tensor(simulation)\n green_lagrange_strains_xx = [integration_point[0,0] for element in green_lagrange_strains for integration_point in\n element]\n if n_dim == 2:\n green_lagrange_strains_yy = [integration_point[1,1] for element in green_lagrange_strains for integration_point in\n element]\n elif n_dim == 3:\n green_lagrange_strains_yy = [integration_point[1,1] for element in green_lagrange_strains for integration_point in\n element]\n green_lagrange_strains_zz = [integration_point[2,2] for element in green_lagrange_strains for integration_point in\n element]\n\n # Assert integration point information\n for idx, total_stress_xx in enumerate(total_stresses_xx):\n self.assertAlmostEqual(0.0, total_stress_xx)\n self.assertAlmostEqual(-1e4, total_stresses_yy[idx])\n if n_dim >= 3:\n self.assertAlmostEqual(0.0, total_stresses_zz[idx])\n\n self.assertAlmostEqual(0.0, effective_stresses_xx[idx])\n self.assertAlmostEqual(-1e4, effective_stresses_yy[idx])\n if n_dim >= 3:\n self.assertAlmostEqual(0.0, effective_stresses_zz[idx])\n\n self.assertAlmostEqual(0.0, green_lagrange_strains_xx[idx])\n self.assertAlmostEqual(-0.00033333, green_lagrange_strains_yy[idx])\n if n_dim >= 3:\n self.assertAlmostEqual(0.0, green_lagrange_strains_zz[idx])\n\n # Assert displacements\n for x_displacement in x_displacements:\n self.assertAlmostEqual(0.0, x_displacement)\n\n for top_node_nbr in top_node_nbrs:\n self.assertAlmostEqual(-0.00033333, y_displacements[top_node_nbr], 6)\n\n if n_dim >= 3:\n for z_displacement in z_displacements:\n self.assertAlmostEqual(0.0, z_displacement)", "def buildInterface(self, idx = 0, z_1 = 1, z_2 = 1, d = 2.5,\\\n verbose = 1, vacuum = 0, translation = None,\\\n surface = None, anchor = \"@\", ab = True):\n\n if verbose > 0:\n self.printInterfaces(idx = idx, anchor = anchor)\n\n \"\"\"Get the distance between the top atom and the top of the cell\"\"\"\n if ab:\n B1, B2 = self.getAB()\n void = B1[2, 2] - np.max(self.pos_1[:, 2] / self.base_1[2, 2] * B1[2, 2])\n else:\n void = self.base_1[2, 2] - np.max(self.pos_1[:, 2])\n d -= void\n\n \"\"\"The strained new basis\"\"\"\n F = np.zeros((3, 3))\n F[2, 2] = self.base_1[2, 2] * z_1 + self.base_2[2, 2] * z_2 + d\n F[0:2, 0:2] = self.cell_1[idx, :, :]\n \n \"\"\"Parameters for the alternative base if specified\"\"\"\n if ab:\n F_ab = np.zeros((3, 3))\n F_ab[:2, :2] = np.matmul(B1[:2, :2], self.rep_1[idx, :, :])\n F_ab[2, 2] = B1[2, 2] * z_1 + B2[2, 2] * z_2 + d\n\n \"\"\"The unstrained new basis\"\"\"\n D = np.zeros((3, 3))\n D[2, 2] = self.base_2[2, 2] * z_2\n D[0:2, 0:2] = self.cell_2[idx, :, :]\n\n \"\"\"Parameters for the alternative base if specified\"\"\"\n if ab:\n D_ab = np.zeros((3, 3))\n D_ab[:2, :2] = np.matmul(B2[:2, :2], self.rep_2[idx, :, :])\n D_ab[2, 2] = B2[2, 2] * z_2\n\n \"\"\"Working on interface A\"\"\"\n \"\"\"Set up the bottom interface with the correct repetitions\"\"\"\n rep_1 = np.zeros((3, 4))\n rep_1[0:2, 0:2] = self.rep_1[idx, :, :]\n rep_1[:, 2] = np.sum(rep_1, axis = 1)\n\n \"\"\"Set all hi-lo limits for the cell repetitions\"\"\"\n h = 2\n rep_1 = [rep_1[0, :].min() - h, rep_1[0, :].max() + h,\\\n rep_1[1, :].min() - h, rep_1[1, :].max() + h,\\\n 0, z_1 - 1]\n\n \"\"\"Extend the cell as spcefied\"\"\"\n pos_1_ext, spec_1, mass_1 = ut.extendCell(base = self.base_1, rep = rep_1,\\\n pos = self.pos_1.T, spec = self.spec_1,\\\n mass = self.mass_1)\n\n \"\"\"Working on interface B\"\"\" \n \"\"\"Set up the top interface with the correct repetitions and rotation\"\"\"\n rep_2 = np.zeros((3, 4))\n rep_2[0:2, 0:2] = self.rep_2[idx, :, :]\n rep_2[:, 2] = np.sum(rep_2, axis = 1)\n\n \"\"\"Set all hi-lo limits for the cell repetitions\"\"\"\n h = 2\n rep_2 = [rep_2[0, :].min() - h, rep_2[0, :].max() + h,\\\n rep_2[1, :].min() - h, rep_2[1, :].max() + h,\\\n 0, z_2 - 1]\n\n \"\"\"Extend the cell as specified\"\"\"\n pos_2_ext, spec_2, mass_2 = ut.extendCell(base = self.base_2, rep = rep_2,\\\n pos = self.pos_2.T, spec = self.spec_2,\\\n mass = self.mass_2)\n\n \"\"\"Initial rotation\"\"\"\n initRot = np.deg2rad(self.ang[idx])\n\n \"\"\"Rotate the positions pos_rot = R*pos\"\"\"\n pos_2_ext_rot = ut.rotate(pos_2_ext, initRot, verbose = verbose - 1)\n\n \"\"\"Convert to direct coordinates in the unstrained D base\"\"\"\n pos_2_d_D = np.matmul(np.linalg.inv(D), pos_2_ext_rot)\n\n \"\"\"Convert the cell back to Cartesian using the strained basis.\n But with the Z parameter as in the D cell or as the Z in the \n alternative base if specified\"\"\"\n temp_F = F.copy()\n if ab:\n temp_F[2, 2] = B2[2, 2] * z_2\n else:\n temp_F[2, 2] = D[2, 2]\n pos_2_F = np.matmul(temp_F, pos_2_d_D)\n\n \"\"\"If using an alternative base fix the hight of the bottom cell\"\"\"\n if ab:\n temp_F = F.copy()\n temp_F[2, 2] = self.base_1[2, 2] * z_1\n pos_1_d = np.matmul(np.linalg.inv(temp_F), pos_1_ext)\n temp_F[2, 2] = B1[2, 2] * z_1\n pos_1_ext = np.matmul(temp_F, pos_1_d)\n\n \"\"\"Combine the positions of the two cells\"\"\"\n pos = np.zeros((3, pos_1_ext.shape[1] + pos_2_F.shape[1]))\n pos[:, :pos_1_ext.shape[1]] = pos_1_ext\n\n \"\"\"Shift Z positions of top cell to (cell_A + d)\"\"\"\n if ab:\n height = B1[2, 2] * z_1 + d\n else:\n height = self.base_1[2, 2] * z_1 + d\n\n pos_2_F[2, :] = pos_2_F[2, :] + height\n pos[:, pos_1_ext.shape[1]:] = pos_2_F\n\n \"\"\"If a translation is specified shift (x,y) coordinates of top cell accordingly\"\"\" \n if translation is not None:\n T = ut.getTranslation(translation, surface, verbose = verbose)[0]\n cT = np.matmul(self.base_1, T)\n pos[:, pos_1_ext.shape[1]:] = pos[:, pos_1_ext.shape[1]:] + cT[:, np.newaxis]\n if verbose: \n string = \"Translation [%.2f, %.2f, %.2f] (C) or [%.2f, %.2f, %.2f] (D)\"\\\n % (cT[0], cT[1], cT[2], T[0], T[1], T[2])\n ut.infoPrint(string)\n\n \"\"\"Convert the entire new cell to direct coordinates, add d above as well\"\"\"\n if ab:\n F[2, 2] = B1[2, 2] * z_1 + d + B2[2, 2] * z_2 + d\n else:\n F[2, 2] += d\n pos_d = np.matmul(np.linalg.inv(F), pos)\n\n \"\"\"Change the base if an alternative base is used\"\"\"\n if ab:\n F[:2, :2] = F_ab[:2, :2]\n\n \"\"\"Remove all positions outside [0, 1)\"\"\"\n pos_d = np.round(pos_d, 8)\n F = np.round(F, 8)\n\n keep = np.all(pos_d < 1, axis = 0) * np.all(pos_d >= 0, axis = 0)\n pos_d = pos_d[:, keep]\n species = np.concatenate((spec_1, spec_2))[keep]\n mass = np.concatenate((mass_1, mass_2))[keep]\n\n \"\"\"Return to cartesian coordinates and change shape to (...,3)\"\"\"\n pos = np.matmul(F, pos_d).T\n\n \"\"\"Add vacuum if specified\"\"\"\n F[2, 2] = F[2, 2] + vacuum\n if verbose: \n string = \"Z-distance fixed (between,above): %.2f | Vacuum added (above): %.2f\"\\\n % (d + void, vacuum)\n ut.infoPrint(string)\n if ab:\n string = \"Interface constructed with an alternative base\"\n ut.infoPrint(string)\n\n return F, pos, species, mass", "def total_loc_efield(i, r1, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod, nmin_sc, nmax_sc,\n R_particle, eps_particle):\n if i == 1:\n ri = r1\n rj = r2\n j = 2\n elif i == 2:\n ri = r2\n rj = r1\n j = 1\n else:\n ri = np.array([0, 0, 0])\n rj = np.array([0, 0, 0])\n j = 0\n print('ERROR: i is out of range!')\n\n k2_eps0 = k**2 / const.epsilon0\n E0i = E0_sum(ri, k, fiber_radius, eps_out, eps_in,\n E0_mod, nmin_sc, nmax_sc, case)\n\n Gsii = gff.GF_pol(k, eps_out, eps_in, fiber_radius,\n ri, ri, nmin, nmax, kzimax)\n G0ij = gfv.GF_vac_pol(ri, rj, k)\n\n Gsij = gff.GF_pol(k, eps_out, eps_in, fiber_radius,\n ri, rj, nmin, nmax, kzimax)\n Gij = G0ij + Gsij\n\n pi = dipole_moment(i, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n pj = dipole_moment(j, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n return(E0i + k2_eps0 * (np.dot(Gij, pj) + np.dot(Gsii, pi)))", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def set_moment_of_inertia(pa):\n # no of bodies\n nb = pa.nb[0]\n # loop over all the bodies\n for i in range(nb):\n fltr = np.where(pa.body_id == i)[0]\n cm_i = pa.cm[3 * i:3 * i + 3]\n\n I = np.zeros(9)\n for j in fltr:\n # Ixx\n I[0] += pa.m[j] * (\n (pa.y[j] - cm_i[1])**2. + (pa.z[j] - cm_i[2])**2.)\n\n # Iyy\n I[4] += pa.m[j] * (\n (pa.x[j] - cm_i[0])**2. + (pa.z[j] - cm_i[2])**2.)\n\n # Izz\n I[8] += pa.m[j] * (\n (pa.x[j] - cm_i[0])**2. + (pa.y[j] - cm_i[1])**2.)\n\n # Ixy\n I[1] -= pa.m[j] * (pa.x[j] - cm_i[0]) * (pa.y[j] - cm_i[1])\n\n # Ixz\n I[2] -= pa.m[j] * (pa.x[j] - cm_i[0]) * (pa.z[j] - cm_i[2])\n\n # Iyz\n I[5] -= pa.m[j] * (pa.y[j] - cm_i[1]) * (pa.z[j] - cm_i[2])\n\n I[3] = I[1]\n I[6] = I[2]\n I[7] = I[5]\n\n pa.moig[9 * i:9 * i + 9] = I[:]\n\n I_inv = np.linalg.inv(I.reshape(3, 3))\n I_inv = I_inv.ravel()\n pa.mib[9 * i:9 * i + 9] = I_inv[:]", "def __init__(self, temperature=298 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n super(AndersenVelocityVerletIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n kT = kB * temperature\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"p_collision\", timestep * collision_rate) # per-particle collision probability per timestep\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (computed later)\n self.addPerDofVariable(\"collision\", 0) # 1 if collision has occured this timestep, 0 otherwise\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Update velocities from Maxwell-Boltzmann distribution for particles that collide.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"collision\", \"step(p_collision-uniform)\") # if collision has occured this timestep, 0 otherwise\n self.addComputePerDof(\"v\", \"(1-collision)*v + collision*sigma_v*gaussian\") # randomize velocities of particles that have collided\n\n #\n # Velocity Verlet step\n #\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))", "def main():\n\n # read simulation data\n filename = pathlib.Path(__file__).expanduser().resolve().parent.joinpath(\"solutions.nc\")\n sim_data, _ = read_cf(filename, [\"w\", \"hu\"])\n x = sim_data[\"x\"]\n w = sim_data[\"w\"][-1, :, :] # only keep the soln at the last time\n w = numpy.mean(w, 0) # use the average in y direction\n hu = sim_data[\"hu\"][-1, :, :]\n hu = numpy.mean(hu, 0)\n\n # get a set of analytical solution for error\n b_ana = topo(x)\n h_ana = numpy.zeros_like(x)\n C0, C1 = get_coeffs(b_ana, 4.42, 2.0, 9.81)\n for i, c1 in enumerate(C1):\n h_ana[i] = numpy.roots([1.0, c1, 0., C0])[0]\n w_ana = h_ana + b_ana\n\n # get another set of solution for plotting\n x_plot = numpy.linspace(0., 25., 1000, dtype=numpy.float64)\n b_plot = topo(x_plot)\n h_plot = numpy.zeros_like(x_plot)\n C0, C1 = get_coeffs(b_plot, 4.42, 2.0, 9.81)\n for i, c1 in enumerate(C1):\n h_plot[i] = numpy.roots([1.0, c1, 0., C0])[0]\n w_plot = h_plot + b_plot\n\n # relative L1 error\n w_err = numpy.abs((w-w_ana)/w_ana)\n\n # total volume per unit y\n vol = w.sum() * (x[1] - x[0])\n vol_ana = w_ana.sum() * (x[1] - x[0])\n print(\"Total volume per y: analytical -- {} m^2; \".format(vol_ana) +\n \"simulation -- {} m^2\".format(vol))\n\n # plot\n pyplot.figure()\n pyplot.plot(x_plot, b_plot, \"k-\", lw=4, label=\"Topography elevation (m)\")\n pyplot.plot(x_plot, w_plot, \"k-\", lw=2, label=\"Analytical solution\")\n pyplot.plot(x, w, ls='', marker='x', ms=5, alpha=0.6, label=\"Simulation solution\")\n pyplot.title(\"Subcritical flow: water level\")\n pyplot.xlabel(\"x (m)\")\n pyplot.ylabel(\"Water level (m)\")\n pyplot.grid()\n pyplot.legend()\n pyplot.savefig(\"simulation_vs_analytical_w.png\", dpi=166)\n\n pyplot.figure()\n pyplot.plot(x_plot, h_plot, \"k-\", lw=2, label=\"Analytical solution\")\n pyplot.plot(x, w-b_ana, ls='', marker='x', ms=5, alpha=0.6, label=\"Simulation solution\")\n pyplot.title(\"Subcritical flow: water depth\")\n pyplot.xlabel(\"x (m)\")\n pyplot.ylabel(\"Water depth (m)\")\n pyplot.grid()\n pyplot.legend()\n pyplot.savefig(\"simulation_vs_analytical_h.png\", dpi=166)\n\n pyplot.figure()\n pyplot.plot(x_plot, numpy.ones_like(x_plot)*4.42, \"k-\", lw=2, label=\"Analytical solution\")\n pyplot.plot(x, hu, ls='', marker='x', ms=5, alpha=0.6, label=\"Simulation solution\")\n pyplot.title(\"Subcritical flow: discharge\")\n pyplot.xlabel(\"x (m)\")\n pyplot.ylabel(\"Discharge \" r\"($q=hu$)\" \" (m)\")\n pyplot.grid()\n pyplot.legend()\n pyplot.savefig(\"simulation_vs_analytical_hu.png\", dpi=166)\n\n pyplot.figure()\n pyplot.semilogy(x, w_err, \"k-\", lw=2)\n pyplot.title(\"Subcritical flow: relative L1 error of w\")\n pyplot.xlabel(\"x (m)\")\n pyplot.ylabel(r\"$\\left|\\left(w_{simulation}-w_{analytical}\\right)/w_{analytical}\\right|$\")\n pyplot.grid()\n pyplot.savefig(\"simulation_vs_analytical_w_L1.png\", dpi=166)\n\n return 0", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def apply_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n resids['z'] = x*z + z - 4.0\n\n # Output equations need to evaluate a residual just like an explicit comp.\n resids['y'] = x + 2.0*z - unknowns['y']\n #print(x, unknowns['y'], z, resids['z'], resids['y'])", "def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)", "def solve_cg_eps(self, params, r, sz, energy, aux):\n\n dp0_i, g2_i, itr, nsamples = aux\n\n jac = self.vmap_getder(params, r, sz, nsamples)\n #jac_T = conj_transpose(jac)\n\n f_i = jnp.real(-2 * psum(jnp.matmul(energy, jnp.conjugate(jac)), axis_name='p') / nsamples)\n g2_i = self.beta * g2_i + (1. - self.beta) * f_i**2\n g2h_i = jnp.sqrt(g2_i / (1. - self.beta**itr))\n\n# cg_mult = lambda v_i: jnp.real(psum(jnp.matmul(jnp.matmul(v_i, jac_T), jac), axis_name='p') / nsamples) + self.eps * (0.001 + g2h_i) * v_i\n cg_mult = lambda v_i: jnp.real(psum(jnp.matmul(jnp.matmul(jnp.conjugate(jac), v_i), jac), axis_name='p') / nsamples) + self.eps * (0.001 + g2h_i) * v_i\n\n dp_i, info = jax.scipy.sparse.linalg.cg(cg_mult, f_i, x0=dp0_i, tol=1e-5, atol=0.0, maxiter=200)\n return dp_i, g2_i", "def calc_nonlinear_rhs(self, A):\n cff = -1/(4*self.dx) \n N = self.N\n rhs = np.zeros((N,))\n alpha = self.alpha\n\n # Central difference for the interior points\n rhs[1:N-1] = cff*alpha[1:-1] * (A[2:]*A[2:] - A[0:-2]*A[0:-2])\n\n # Boundary term forward difference\n rhs[0] = 0.5*cff*alpha[0]*(A[1]*A[1] - A[0]*A[0])\n\n return rhs", "def __init__(self,\n r_i: np.array,\n tau_ij,\n alpha_ij: np.ndarray,\n nu_i,\n delta_i,\n solver_verbose: int = 0,\n solver_method: str = 'dogbox'):\n self.kappa_ref = 0.034\n self.eps_ref = 1960.0\n\n self.r = r_i\n self.size = r_i.shape[0]\n self.temp_dep_tau = False\n if callable(tau_ij):\n self.temp_dep_tau = True\n self.tau_fun = tau_ij\n else:\n self.tau = tau_ij\n if tau_ij.shape != (self.size, self.size):\n raise Exception(f'tau_ij size {tau_ij.shape} is inconsistent with r_i size {self.size}')\n self.alpha = alpha_ij\n if alpha_ij.shape != (self.size, self.size):\n raise Exception(f'alpha_ij size {alpha_ij.shape} is inconsistent with r_i size {self.size}')\n self.nu = nu_i\n self.delta = delta_i\n if len(nu_i) != self.size or len(delta_i) != self.size:\n raise Exception(f'nu_i size {len(nu_i)} or delta_i size {len(delta_i)} '\n f'is inconsistent with r_i size {self.size}')\n\n self.delta_as, self.delta_ds = self.get_delta_arrays()\n self.delta_ad = np.zeros((len(self.delta_as), len(self.delta_ds)))\n self.solver_verbose = solver_verbose\n self.solver_method = solver_method", "def inertia_tensor_partial(self, part, masswt=True, zero=ZERO):\n tensor = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n for i in part:\n if masswt:\n # I(alpha, alpha)\n tensor[0][0] += self.mass(i) * (self.y(i) * self.y(i) + self.z(i) * self.z(i))\n tensor[1][1] += self.mass(i) * (self.x(i) * self.x(i) + self.z(i) * self.z(i))\n tensor[2][2] += self.mass(i) * (self.x(i) * self.x(i) + self.y(i) * self.y(i))\n\n # I(alpha, beta)\n tensor[0][1] -= self.mass(i) * self.x(i) * self.y(i)\n tensor[0][2] -= self.mass(i) * self.x(i) * self.z(i)\n tensor[1][2] -= self.mass(i) * self.y(i) * self.z(i)\n\n else:\n # I(alpha, alpha)\n tensor[0][0] += self.y(i) * self.y(i) + self.z(i) * self.z(i)\n tensor[1][1] += self.x(i) * self.x(i) + self.z(i) * self.z(i)\n tensor[2][2] += self.x(i) * self.x(i) + self.y(i) * self.y(i)\n\n # I(alpha, beta)\n tensor[0][1] -= self.x(i) * self.y(i)\n tensor[0][2] -= self.x(i) * self.z(i)\n tensor[1][2] -= self.y(i) * self.z(i)\n\n # mirror\n tensor[1][0] = tensor[0][1]\n tensor[2][0] = tensor[0][2]\n tensor[2][1] = tensor[1][2]\n\n # Check the elements for zero and make them a hard zero.\n for i in range(3):\n for j in range(3):\n if math.fabs(tensor[i][j]) < zero:\n tensor[i][j] = 0.0\n return tensor", "def __init__(self, f, N_elements, C, D, analytical, grid_points):\n\n\n\n self.Ne = N_elements\n self.gp = grid_points\n self.C = C\n self.D = D\n self.f = lambda x: f(x)\n self.tol = 10e-4\n self.x = sym.Symbol(\"x\")\n\n self.h = 1/(2*self.Ne)\n self.global_matrix = np.zeros([2*self.Ne, 2*self.Ne])\n self.global_vector = np.zeros([2*self.Ne])\n self.psi = sym.zeros(3*self.Ne,1)\n\n self.analytical = lambda x,C,D: analytical(x,C,D)\n\n self.x_values = np.linspace(0,1,self.gp)", "def R_term(\n enst, # enstrophy field\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33, # strain rate-33 component\n diff = False): # differentiation flag\n #---------------------------------------------------------------------#\n # Defining domain variables #\n #---------------------------------------------------------------------#\n pi = np.pi # pi\n dx = (2.0*pi)/64.0 # spatial step\n nu = 0.000185 # default viscosity\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n dim = 64\n kspec = np.fft.fftfreq(dim) * dim\n Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n term1 = np.zeros((dim, dim, dim))\n term2 = np.zeros((dim, dim, dim))\n term3 = np.zeros((dim, dim, dim))\n #---------------------------------------------------------------------#\n # Numerator (numpy gradient tool) #\n #---------------------------------------------------------------------#\n if diff is not False:\n term1 = np.gradient(enst,dx, edge_order=2)[0]\n term2 = np.gradient(enst,dx, edge_order=2)[1]\n term3 = np.gradient(enst,dx, edge_order=2)[2]\n #---------------------------------------------------------------------#\n # Numerator (spectral differentiation) #\n #---------------------------------------------------------------------#\n else:\n term1 = 0.5*np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(enst) +\\\n 1j*Kfield[0]*np.fft.fftn(enst)).real\n term2 = 0.5*np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(enst) +\\\n 1j*Kfield[1]*np.fft.fftn(enst)).real\n term3 = 0.5*np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(enst) +\\\n 1j*Kfield[2]*np.fft.fftn(enst)).real\n #---------------------------------------------------------------------#\n # Numerator #\n #---------------------------------------------------------------------#\n num = nu*(term1**2.0+ term2**2.0 + term3**2.0)\n #---------------------------------------------------------------------#\n # Denominator #\n #---------------------------------------------------------------------#\n den = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n #---------------------------------------------------------------------#\n # R calculation #\n #---------------------------------------------------------------------#\n R = num/den\n\n return R", "def IR():\n s = np.array(\n [2.40774137,2.287696084,2.203613927,2.048710132,1.899829585,1.591776247,\n 2.021218754,2.572949552,3.298381484,3.635993426,3.788266224,3.8307278,3.834208811]\n )\n\n TI = np.array([50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 3000])\n\n comp1 = s * np.array([-159.1,-134.2,-109.1,-64.7,25.0,40.1,88.6,126.8,187.6,219.4,245.4,253.6,256.1])\n comp2 = s * np.array([-368.3,-356.9,-343.8,-318.1,-292.0,-242.5,-199.3,-158.4,-68.8,14.2,131.9,219.5,333.5])\n comp3 = s * np.array([-77.5,-51.9,-29.8,9.9,40.2,85.7,115.4,135.1,160.1,167.6,172.3,171.7,171.8])\n comp4 = s * np.array([-265.0,-240.6,-216.7,-170.5,-128.2,-53.5,9.6,62.3,159.7,223.8,296.5,328.3,346.7])\n comp5 = s * np.array([-346.5,-328.9,-312.1,-278.5,-244.4,-182.3,-128.0,-80.0,30.8,109.3,225.1,299.5,372.2])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n MSE = []\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n x_new = np.linspace(0, 3000, 10000)\n for i, j, k in zip(comp, colors, np.arange(1, 6)):\n plt.scatter(TI, i, c=j)\n # popt, _ = curve_fit(MZ, TI, i, p0=np.array([200, 220, 300]))\n popt, _ = curve_fit(MZ, TI, i, p0=np.array([300, 220]))\n # M_z0, T1, M0 = popt\n M0, T1 = popt\n y_new = MZ(x_new, *popt)\n plt.plot(x_new, y_new, \"--\", c=j, label=f\"Fit Comp. {k:d} : $T_1$={T1:3.2f}\")\n MSE.append(mean_squared_error(i,y_new[TI]))\n print(MSE)\n print(np.mean(MSE))\n plt.grid()\n plt.legend(loc=\"best\")\n plt.xlabel(\"TI\")\n plt.ylabel(r\"Singal Intensity $M_z$\")\n plt.show()", "def global_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D_global(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU_global(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG", "def refugia_adj_5_full_2_iter1 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def __init__(\n self,\n grid,\n mannings_n=0.03,\n critical_flow_depth=0.003,\n mannings_epsilon=0.33333333,\n dt_max=0.3,\n max_courant=0.2,\n min_surface_water_depth=1.0e-8,\n ):\n super().__init__(grid)\n\n if not isinstance(self.grid, RasterModelGrid):\n ValueError(\"KinematicWaveRengers: grid must be regular\")\n\n if np.isclose(dt_max, 0.0):\n raise ValueError(\"KinematicWaveRengers: dt must be > 0.0\")\n\n active = np.nonzero(self.grid.status_at_node != self.grid.BC_NODE_IS_CLOSED)\n self._h = self.grid.at_node[\"surface_water__depth\"]\n self._active = active\n self._hc = critical_flow_depth\n self._n = mannings_n\n self._negepsilon = -mannings_epsilon\n\n self._dt_max = dt_max\n self._min_surface_water_depth = min_surface_water_depth\n self._active_depths = self.grid.at_node[\"surface_water__depth\"][active]\n all_grads = self.grid.calc_grad_at_link(\"topographic__elevation\")\n hoz_grads = self.grid.map_mean_of_horizontal_active_links_to_node(all_grads)\n vert_grads = self.grid.map_mean_of_vertical_active_links_to_node(all_grads)\n self._hozslopept5 = np.fabs(hoz_grads[active]) ** 0.5\n self._vertslopept5 = np.fabs(vert_grads[active]) ** 0.5\n self._velx = self.grid.zeros(\"node\", dtype=float)\n self._vely = self.grid.zeros(\"node\", dtype=float)\n self._qy = np.zeros(grid.number_of_nodes + 1, dtype=float)\n self._qx = np.zeros(grid.number_of_nodes + 1, dtype=float)\n self._poshozgrads = hoz_grads > 0.0\n self._posvertgrads = vert_grads > 0.0\n if np.isclose(self.grid.dx, self.grid.dy):\n self._equaldims = True\n self._courant_prefactor = max_courant * self.grid.dx\n else:\n self._equaldims = False\n self._courant_prefactor = max_courant * self.grid.dx * self.grid.dy\n self._neighbors = self.grid.adjacent_nodes_at_node.copy()\n self._neighbors[self._neighbors == self.grid.BAD_INDEX] = -1\n self._water_balance = []\n self._actives_BCs = (\n self.grid.status_at_node[active] == self.grid.BC_NODE_IS_FIXED_VALUE\n )\n self._actives_BCs_water_depth = self._h[active][self._actives_BCs]\n fixed_grad_nodes = self.grid.fixed_gradient_boundary_nodes.copy()\n fixed_grad_anchors = self.grid.fixed_gradient_boundary_node_anchor_node\n\n # ^add this value to the anchor nodes to update the BCs\n # these also need to be mapped to active_IDs:\n blank_nodes = self.grid.zeros(\"node\", dtype=bool)\n blank_nodes[fixed_grad_nodes] = True\n self._fixed_grad_nodes_active = np.where(blank_nodes[active])[0]\n blank_nodes.fill(False)\n blank_nodes[fixed_grad_anchors] = True\n self._fixed_grad_anchors_active = np.where(blank_nodes[active])[0]\n\n # create outputs\n self.initialize_output_fields()", "def __init__(self, u1=(1,0,0), u2=(0,1,0), u3=(0,0,1)):\n u1 = vec3(u1)\n u2 = vec3(u2)\n u3 = vec3(u3)\n\n if triple_scalar_product(u1, u2, u3) != 1:\n print >> sys.stderr, \"!! Invalid lattice vectors: u1 = %s, u2 = %s, u3 = %s\" % (u1,u2,u3)\n self.e1 = vec3(1,0,0)\n self.e2 = vec3(0,1,0)\n self.e3 = vec3(0,0,1)\n else:\n s1 = square(u1)\n s2 = square(u2)\n d12 = dot(u1, u2)\n d23 = dot(u2, u3)\n d13 = dot(u1, u3)\n alpha = -d12/s1\n gamma = -(alpha*d13 + d23)/(alpha*d12 + s2)\n beta = -(d13 + gamma*d12)/s1\n self.e1 = u1\n self.e2 = u2 + alpha*u1\n self.e3 = u3 + beta*u1 + gamma*u2\n\n if verbose:\n print(\"e1 = %s\" % self.e1)\n print(\"e2 = %s\" % self.e2)\n print(\"e3 = %s\" % self.e3)\n\n self.L1 = length(self.e1)\n self.L2 = length(self.e2)\n self.L3 = length(self.e3)\n self.n1 = self.e1/self.L1\n self.n2 = self.e2/self.L2\n self.n3 = self.e3/self.L3\n self.cells = []\n\n v0 = vec3(0,0,0)\n self.v = [v0,\n v0 + self.e3,\n v0 + self.e2,\n v0 + self.e2 + self.e3,\n v0 + self.e1,\n v0 + self.e1 + self.e3,\n v0 + self.e1 + self.e2,\n v0 + self.e1 + self.e2 + self.e3]\n\n # Compute bounding box of cuboid\n xs = [vk.x for vk in self.v]\n ys = [vk.y for vk in self.v]\n zs = [vk.z for vk in self.v]\n vmin = vec3(min(xs), min(ys), min(zs))\n vmax = vec3(max(xs), max(ys), max(zs))\n\n # Extend to nearest integer coordinates\n ixmin = int(floor(vmin.x))\n ixmax = int(ceil(vmax.x))\n iymin = int(floor(vmin.y))\n iymax = int(ceil(vmax.y))\n izmin = int(floor(vmin.z))\n izmax = int(ceil(vmax.z))\n if verbose:\n print(\"ixmin, ixmax = %d, %d\" % (ixmin,ixmax))\n print(\"iymin, iymax = %d, %d\" % (iymin,iymax))\n print(\"izmin, izmax = %d, %d\" % (izmin,izmax))\n\n # Determine which cells (and which faces within those cells) are non-trivial\n for ix in range(ixmin, ixmax):\n for iy in range(iymin, iymax):\n for iz in range(izmin, izmax):\n shift = vec3(-ix, -iy, -iz)\n faces = [Plane(self.v[0] + shift, +self.n1),\n Plane(self.v[4] + shift, -self.n1),\n Plane(self.v[0] + shift, +self.n2),\n Plane(self.v[2] + shift, -self.n2),\n Plane(self.v[0] + shift, +self.n3),\n Plane(self.v[1] + shift, -self.n3)]\n\n c = Cell(ix, iy, iz)\n skipcell = False\n for f in faces:\n r = UnitCubeTest(f)\n if r == +1:\n # Unit cube is completely above this plane; this cell is empty\n continue\n elif r == 0:\n # Unit cube intersects this plane; keep track of it\n c.faces.append(f)\n elif r == -1:\n skipcell = True\n break\n\n if skipcell or len(c.faces) == 0:\n if verbose:\n print(\"Skipping cell at (%d,%d,%d)\" % (ix,iy,iz))\n continue\n else:\n self.cells.append(c)\n if verbose:\n print(\"Adding cell at (%d,%d,%d)\" % (ix,iy,iz))\n\n # For the identity remapping, use exactly one cell\n if len(self.cells) == 0:\n self.cells.append(Cell())\n\n # Print the full list of cells\n if verbose:\n print(\"%d non-empty cells\" % len(self.cells))\n for c in self.cells:\n print(\"Cell at (%d,%d,%d) has %d non-trivial planes\" % (c.ix,\n c.iy, c.iz, len(c.faces)))", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0, 0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda tt: np.interp(tt, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def __init__(self,files_atte,files_emis,states,lifetime):\n self.files_atte = files_atte #!\n self.files_emis = files_emis #!\n self.beam_emis = [] #!\n self.beam_atte = [] #!\n print('The wavelength assumes an hydrogen atom')\n self.n_low = states[0] #!\n self.n_high = states[1] #!\n self.E0 = -13.6\n self.lifetime = lifetime\n self.read_adas()\n\n # compute the interpolant\n self.atte_tck_dens = [] #!\n self.emis_tck_dens = [] #!\n self.atte_tck_temp = [] #!\n self.emis_tck_temp = [] #!\n for i in range(len(self.beam_atte)):\n # get data\n ldensities = np.log(self.get_list_density('atte',i))\n lbeams = np.log(self.get_list_beams('atte',i))\n coef_dens = self.get_coef_density('atte',i)\n lbeams, ldens = np.meshgrid(lbeams, ldensities)\n \n # interpolation over beam and density\n self.atte_tck_dens.append(interpolate.bisplrep(\n lbeams,ldens,coef_dens,kx=1,ky=1))\n \n # get data for the interpolation in temperature\n T = np.log(self.get_list_temperature('atte',i))\n coef_T = self.get_coef_T('atte',i)\n Tref = np.log(self.get_Tref('atte',i))\n index = abs((Tref-T)/Tref) < 1e-4\n \n #interpolation over the temperature\n self.atte_tck_temp.append(interpolate.splrep(\n T,coef_T/coef_T[index],k=1))\n\n for i in range(len(self.beam_emis)):\n # get data\n ldensities = np.log(self.get_list_density('emis',i))\n lbeams = np.log(self.get_list_beams('emis',i))\n coef_dens = self.get_coef_density('emis',i)\n lbeams, ldens = np.meshgrid(lbeams, ldensities)\n \n # interpolation over beam and density\n self.emis_tck_dens.append(interpolate.bisplrep(\n lbeams,ldens,coef_dens,kx=1,ky=1))\n\n # Get data for the interpolation in temperature\n T = np.log(self.get_list_temperature('emis',i))\n coef_T = self.get_coef_T('emis',i)\n Tref = np.log(self.get_Tref('emis',i))\n index = abs((Tref-T)/Tref) < 1e-4\n \n #interpolation over the temperature\n self.emis_tck_temp.append(interpolate.splrep(\n T,coef_T/coef_T[index],k=1))", "def define_ising_helper_functions():\n\n @njit(cache=True)\n def fast_sum(J, s):\n \"\"\"Helper function for calculating energy in calc_e(). Iterates couplings J.\"\"\"\n e = np.zeros(s.shape[0])\n for n in range(s.shape[0]):\n k = 0\n for i in range(s.shape[1]-1):\n for j in range(i+1,s.shape[1]):\n e[n] += J[k]*s[n,i]*s[n,j]\n k += 1\n return e\n\n @njit(\"float64[:](int64[:,:],float64[:])\")\n def calc_e(s, params):\n \"\"\"\n Parameters\n ----------\n s : 2D ndarray of ints\n state either {0,1} or {+/-1}\n params : ndarray\n (h, J) vector\n\n Returns\n -------\n E : ndarray\n Energies of all given states.\n \"\"\"\n \n e = -fast_sum(params[s.shape[1]:],s)\n e -= np.sum(s*params[:s.shape[1]],1)\n return e\n \n def mch_approximation(samples, dlamda):\n \"\"\"Function for making MCH approximation step for Ising model.\"\"\"\n dE = calc_e(samples, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = pair_corr(samples, weights=np.exp(-dE)/len(dE), concat=True) * ZFraction \n assert not (np.any(predsisj < -1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n \n @njit(cache=True)\n def calc_observables(samples):\n \"\"\"Observables for Ising model.\"\"\"\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0], n+n*(n-1)//2))\n \n k = 0\n for i in range(n):\n obs[:,i] = samples[:,i]\n for j in range(i+1,n):\n obs[:,n+k] = samples[:,i] * samples[:,j]\n k += 1\n return obs\n return calc_e, calc_observables, mch_approximation", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def E_l(x_r, x_r_prime, w_n, w_n_1, I_n, I_n_1):\n temp = (1. / (2. * SIGMA_E ** 2.)) * \\\n w_n[x_r] * \\\n w_n_1[x_r_prime] * \\\n ((I_n[x_r] - I_n_1[x_r_prime]) ** 2.)\n \n return temp", "def local_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def ilike(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, N):\r\n\tdetD, gfunc, U = np.zeros(N), np.zeros(N), np.zeros(N)\r\n\tfor i in range(N):\r\n\t\tif np.isfinite(vrad_obs[i]):\r\n\t\t\tdetD[i], gfunc[i] = _like4(init_par, alpha[i], delta[i], plx_obs[i],\r\n\t\t\t\t\t\t mualpha_obs[i], mudelta_obs[i], vrad_obs[i], sigma_obs[i,:], sigma_vrad[i], ccoef[i, :], i)\r\n\t\t\tU[i] = np.log(detD[i]) + gfunc[i] + 4.*np.log(2.*np.pi)\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tdetD[i], gfunc[i] = _like3(init_par, alpha[i], delta[i], plx_obs[i],\r\n\t\t\t\t\t\t mualpha_obs[i], mudelta_obs[i], sigma_obs[i,:], ccoef[i, :], i)\t\r\n\t\t\tU[i] = np.log(detD[i]) + gfunc[i] + 3.*np.log(2.*np.pi)\r\n\r\n\tL = np.sum(U)\r\n\treturn L, gfunc", "def meanAdjust(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n postchis = []\n prechis = []\n model_complete = []\n meas_complete = []\n Bvec_complete = []\n Sol_complete = []\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n\n if numd < 2:\n continue\n\n Neq = np.eye(numZD,dtype=float) * 0.001\n Apart = np.zeros((numd,numZD))\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = 1.\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n \n prechis.append(np.sqrt(prechi/numd))\n postchis.append(np.sqrt(postchi/numd))\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd))\n model = np.dot(Apart,Sol)\n\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #overallPrechi = np.dot(data[:,3].T,data[:,3])\n numd = np.size(meas_complete)\n #print(\"OVERALL STATS:\", np.mean(prechis),np.mean(postchis),np.sqrt(overallPrechi/numD))\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n f = loglikelihood(meas_complete,model_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl_All, pwlSig_All,stats", "def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)", "def process_rixs(save_folder,states_index,temperature,absorption_pol,emission_pol,\n final_state_lifetime, intermediate_state_lifetime,\n ein_min = 1E10, ein_max = -1E10, ein_step = 0.1,\n eloss_min = 1E10, eloss_max = -1E10, eloss_step = 0.1,\n verbose = False):\n\n\n states = get_ground_states(save_folder,states_index,prefix='abs_')\n states['boltz'] = boltz_dist(states['energy'],temperature,verbose=verbose)\n\n absorption = collect_transition_matrix(save_folder,states['label'],absorption_pol,\n states_energy = states['energy'], prefix='abs_')\n\n absorption = cleanup_transition_matrix(absorption,absorption_pol)\n\n emission_label = [label.split()[0] for label in absorption['label']]\n emission = collect_transition_matrix(save_folder,emission_label,emission_pol,\n prefix='emi_')\n\n\n # Apply Bolzmann factor\n for i in range(len(absorption['matrix'])):\n for j in range(absorption['matrix'][i].shape[1]):\n for energy,state,boltz in zip(states['energy'],states['label'],states['boltz']):\n if absorption['label'][i].split()[-1] == state:\n if np.array(absorption['ket_energy'][i][j]-energy) < 0.0002:\n absorption['matrix'][i][:,j] *= boltz\n\n # Construct rixs matrix\n rixs_matrix = OrderedDict({})\n rixs_matrix['label'] = []\n rixs_matrix['energy_inc'] = []\n rixs_matrix['energy_loss'] = []\n rixs_matrix['matrix'] = []\n\n for i in range(len(absorption['label'])):\n for j in range(len(emission['label'])):\n if absorption['label'][i].split()[0] == emission['label'][j].split()[-1]:\n for k in range(len(absorption['ket_energy'][i])):\n emi, inc = np.meshgrid([np.abs(x - states['gs0']) for x in emission['bra_energy'][j]], absorption['bra_energy'][i]- absorption['ket_energy'][i][k])\n rixs_matrix['energy_loss'].append(emi)\n rixs_matrix['energy_inc'].append(inc)\n #rixs_matrix['label'].append(absorption['label'][i] + ' {:s} {:s}'.format(emission['label'][j].split()[1],emission['label'][j].split()[2]))\n rixs_matrix['label'].append(emission['label'][j] + ' {:s} {:s}'.format(absorption['label'][i].split()[1],absorption['label'][i].split()[2]))\n rixs_matrix['matrix'].append(np.zeros((len(rixs_matrix['energy_inc'][-1]), len(rixs_matrix['energy_loss'][-1][0,:])), dtype = complex))\n for l in range(len(rixs_matrix['matrix'][-1])):\n rixs_matrix['matrix'][-1][l,:] = [x*absorption['matrix'][i][l,k] for x in emission['matrix'][j][:,l]]\n\n # Finds energy limits\n if ein_max == -1E10:\n for line in absorption['bra_energy']:\n if ein_max < np.max(line):\n ein_max = np.max(line)\n ein_max += 2.0\n if ein_min == 1E10:\n for line in absorption['bra_energy']:\n if ein_min > np.min(line):\n ein_min = np.min(line)\n ein_min -= 2.0\n\n if eloss_max == -1E10:\n for line in emission['bra_energy']:\n if eloss_max < (np.max(line) - states['gs0']):\n eloss_max = np.max(line) - states['gs0']\n eloss_max += 1.0\n if eloss_min == 1E10:\n for line in emission['bra_energy']:\n if eloss_min > (np.min(line) - states['gs0']):\n eloss_min = np.min(line) - states['gs0']\n eloss_min -= 1.0\n\n energy_loss = np.linspace(eloss_min , eloss_max, int((eloss_max - eloss_min)/eloss_step + 0.5))\n energy_inc = np.linspace(ein_min, ein_max, int((ein_max - ein_min)/ein_step + 0.5))\n\n if verbose is True:\n time0 = datetime.now()\n\n if verbose is True:\n print('\\nStarting RIXS calculation...')\n print('Transitions used:')\n\n for l in rixs_matrix['label']:\n print(l)\n\n print('\\nBuilding RIXS matrix...')\n\n #Finding transitions that will interfere\n rixs_interference = []\n rixs_interference_label = []\n for i in range(len(rixs_matrix['label'])):\n\n init = rixs_matrix['label'][i].split()[0]\n final = rixs_matrix['label'][i].split()[-1]\n\n if len(rixs_interference) == 0:\n rixs_interference_label.append('{:s},{:s}'.format(init,final))\n rixs_interference.append([i])\n else:\n\n if '{:s},{:s}'.format(init,final) in rixs_interference_label:\n rixs_interference[rixs_interference_label.index('{:s},{:s}'.format(init,final))].append(i)\n else:\n rixs_interference_label.append('{:s},{:s}'.format(init,final))\n rixs_interference.append([i])\n\n if verbose is True:\n print('Interference pairs')\n for i,j in zip(rixs_interference_label, rixs_interference):\n print(i,j)\n\n print('')\n\n rixs = pd.DataFrame(np.zeros((len(energy_loss),len(energy_inc))),index=energy_loss,columns=energy_inc)\n\n for eloss in energy_loss:\n if verbose is True:\n if np.abs(eloss - int(eloss+0.5)) < eloss_step/2:\n print('Eloss = {:0.2f} eV... '.format(eloss))\n for ein in energy_inc:\n aux = np.array([])\n for l in range(len(rixs_interference)):\n M = []\n for k in rixs_interference[l]:\n M.append(np.sum(rixs_matrix['matrix'][k]/(ein-rixs_matrix['energy_inc'][k] + intermediate_state_lifetime*1j),axis=0))\n aux = np.append(aux, np.abs(np.sum(M,axis=0))**2*final_state_lifetime/2/np.pi/((eloss-rixs_matrix['energy_loss'][rixs_interference[l][0]][0,:])**2 + final_state_lifetime**2/4))\n rixs[ein][eloss] = np.sum(aux)\n\n if verbose is True:\n print('Done!')\n\n if verbose is True:\n timef = datetime.now()\n print('Time to create rixs matrixes: ', timef-time0)\n\n return rixs", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")" ]
[ "0.6120228", "0.5804139", "0.5638813", "0.5638267", "0.5628902", "0.5628902", "0.5626579", "0.5618351", "0.5618351", "0.5618351", "0.56004834", "0.5596577", "0.5594398", "0.55738205", "0.55608195", "0.5557147", "0.55332184", "0.5526329", "0.5496661", "0.54944676", "0.54727423", "0.54061705", "0.53927857", "0.5366567", "0.5349418", "0.53488505", "0.5338569", "0.5337384", "0.5327658", "0.5317227", "0.5316546", "0.5313045", "0.5308712", "0.52977616", "0.5269674", "0.52601415", "0.5247107", "0.5247107", "0.52439743", "0.5224979", "0.521173", "0.5201773", "0.52002937", "0.52000576", "0.5198227", "0.51844937", "0.51778233", "0.5173936", "0.51604193", "0.5157195", "0.51466525", "0.5135085", "0.51340824", "0.51259774", "0.51008207", "0.5100551", "0.5094845", "0.5087958", "0.5087332", "0.50864667", "0.5085104", "0.5081249", "0.50798285", "0.50750965", "0.50718164", "0.50701547", "0.5065208", "0.5064312", "0.506321", "0.5056933", "0.50527394", "0.5046125", "0.50444996", "0.5044022", "0.5040449", "0.50348234", "0.5033282", "0.50313735", "0.5029845", "0.5028378", "0.5027821", "0.5027204", "0.5022421", "0.5021385", "0.50090396", "0.5006913", "0.5005731", "0.50001574", "0.49933738", "0.49903157", "0.49858493", "0.49839693", "0.49800307", "0.49775848", "0.49710718", "0.49691704", "0.49685797", "0.4967903", "0.4963734", "0.4963705", "0.4963424" ]
0.0
-1
Extract zipfile to a directory if password is correct.
def extractfile(file, passwd): try: zipf = zipfile.ZipFile(file) zipf.extractall(path=os.path.join(file[:-4]), pwd=str.encode(passwd)) print('Password: {}'.format(passwd)) except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unzipArchives(zip_file, password):\n with ZipFile(zip_file) as archive:\n archive.extractall(pwd=bytes(password, \"utf8\"))", "def unzip_item(source_path, destination_path, password):\n\n if not destination_path:\n destination_path = source_path.replace(\".zip\", \"\")\n if not os.path.isdir(destination_path):\n os.makedirs(destination_path)\n else:\n destination_path += \"_unzipped\"\n if not os.path.isdir(destination_path):\n os.makedirs(destination_path)\n\n try:\n with pyzipper.AESZipFile(source_path) as z:\n members = z.infolist()\n for i, member in enumerate(members):\n z.extract(member, destination_path, pwd=password)\n print(f\"Unpacked {member.filename} from archive.\")\n print(f\"{source_path} unpacked successfully to {destination_path}.\")\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)", "def unzipfile(filename, passcode):\n # Password is SHA-256 hash of the pass code received\n password = hashlib.sha256(passcode.encode('utf-8')).hexdigest()\n # Unzip with password\n with ZipFile(filename) as zf:\n zf.extractall(pwd=bytes(password, 'utf-8'))", "def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")", "def extract_zip(zip_path, target_folder):\n with zipfile.ZipFile(zip_path) as archive:\n archive.extractall(target_folder)", "def extract_and_clean(zipper, zip_path, filename):\n zipper.extract(zip_path)\n if \"/\" in zip_path :\n os.rename(zip_path, filename)\n shutil.rmtree(zip_path.split('/')[0])", "def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")", "def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))", "def _extract_file(dest_path, root_dir):\n logger.info(\"Unzipping the dataset file.\")\n with zipfile.ZipFile(dest_path, \"r\") as zip_dir:\n zip_dir.extractall(root_dir)", "def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return", "def SshExtractZip(host, zipname, dst):\n command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh unzip -o -d \"%s\" \"%s\" on \"%s\" (%s)' %\n (dst, zipname, host, result))\n\n # unzip will create directories with access 700, which is not often what we\n # need. Fix the permissions for the whole archive.\n command = ['ssh', host, 'chmod', '-R', '755', dst]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh chmod -R 755 \"%s\" on \"%s\" (%s)' %\n (dst, host, result))", "def __extract_zip(self):\n archive_binaries_dir = None\n zip_file = zipfile.ZipFile(self.archive)\n try:\n extract_dir = tempfile.mkdtemp()\n archive_binaries_dir = self.__create_extraction_dir(\n zip_file.namelist(), extract_dir, zip_file.extract)\n finally:\n zip_file.close()\n return archive_binaries_dir, extract_dir", "def unzip_file(zip_path, directory_to_extract_to):\n ensure_dir(directory_to_extract_to)\n with zipfile.ZipFile(file=zip_path) as zip_file:\n # Loop over each file\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n try:\n zip_file.extract(member=file, path=directory_to_extract_to)\n except BadZipFile as e:\n print(e)", "def zip_folder(source_path, destination_path, password):\n\n source_path = os.path.abspath(source_path)\n\n if not destination_path:\n destination_path = source_path + \".zip\"\n\n if not destination_path.endswith(\".zip\"):\n destination_path += \".zip\"\n\n try:\n parent_folder = os.path.dirname(source_path)\n contents = os.walk(source_path)\n\n if password:\n z = pyzipper.AESZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA, encryption=pyzipper.WZ_AES)\n z.setpassword(password)\n else:\n z = pyzipper.ZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA)\n\n try:\n for root, folders, files in contents:\n # Include all subfolders, including empty ones.\n for folder_name in folders:\n absolute_path = os.path.join(root, folder_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n for file_name in files:\n absolute_path = os.path.join(root, file_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n print(f\"{destination_path} created successfully.\")\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)\n\n finally:\n z.close()\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)", "def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination", "def fromZip(self, zip_location,extract_location):\n zip_file = zipfile.ZipFile(zip_location,'r')\n zip_file.extractall(extract_location)", "def ExtractZip(zip_path, dest_dir):\n zip_path = GetWindowsPathWithUNCPrefix(zip_path)\n dest_dir = GetWindowsPathWithUNCPrefix(dest_dir)\n with zipfile.ZipFile(zip_path) as zf:\n for info in zf.infolist():\n zf.extract(info, dest_dir)\n # UNC-prefixed paths must be absolute/normalized. See\n # https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation\n file_path = os.path.abspath(os.path.join(dest_dir, info.filename))\n # The Unix st_mode bits (see \"man 7 inode\") are stored in the upper 16\n # bits of external_attr. Of those, we set the lower 12 bits, which are the\n # file mode bits (since the file type bits can't be set by chmod anyway).\n attrs = info.external_attr >> 16\n if attrs != 0: # Rumor has it these can be 0 for zips created on Windows.\n os.chmod(file_path, attrs & 0o7777)", "def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error", "def ExtractZip(filename, output_dir, verbose=True):\n MaybeMakeDirectory(output_dir)\n\n # On Linux and Mac, we use the unzip command as it will\n # handle links and file bits (executable), which is much\n # easier then trying to do that with ZipInfo options.\n #\n # On Windows, try to use 7z if it is installed, otherwise fall back to python\n # zip module and pray we don't have files larger than 512MB to unzip.\n unzip_cmd = None\n if IsLinux():\n unzip_cmd = ['unzip', '-o']\n elif IsMac():\n # The Mac version of unzip does not have LARGE_FILE_SUPPORT until\n # macOS 10.12, so use ditto instead. The Python ZipFile fallback\n # used on Windows does not support symbolic links, which makes it\n # unsuitable for Mac builds.\n unzip_cmd = ['ditto', '-x', '-k']\n elif IsWindows() and os.path.exists('C:\\\\Program Files\\\\7-Zip\\\\7z.exe'):\n unzip_cmd = ['C:\\\\Program Files\\\\7-Zip\\\\7z.exe', 'x', '-y']\n\n if unzip_cmd:\n # Make sure path is absolute before changing directories.\n filepath = os.path.abspath(filename)\n saved_dir = os.getcwd()\n os.chdir(output_dir)\n command = unzip_cmd + [filepath]\n # When using ditto, a destination is required.\n if command[0] == 'ditto':\n command += ['.']\n result = RunCommand(command)\n os.chdir(saved_dir)\n if result:\n raise ExternalError('unzip failed: %s => %s' % (str(command), result))\n else:\n assert IsWindows()\n zf = zipfile.ZipFile(filename)\n # TODO(hinoka): This can be multiprocessed.\n for name in zf.namelist():\n if verbose:\n print 'Extracting %s' % name\n zf.extract(name, output_dir)\n if IsMac():\n # Restore permission bits.\n os.chmod(os.path.join(output_dir, name),\n zf.getinfo(name).external_attr >> 16L)", "def extract(cls, path, outdir):\r\n with open_zip(path) as zip:\r\n for path in zip.namelist():\r\n # While we're at it, we also perform this safety test.\r\n if path.startswith('/') or path.startswith('..'):\r\n raise ValueError('Zip file contains unsafe path: %s' % path)\r\n # Ignore directories. extract() will create parent dirs as needed.\r\n if not path.endswith('/'):\r\n zip.extract(path, outdir)", "def unzip_to_temp_dir(zip_file_name):\n if not zip_file_name or not os.path.exists(zip_file_name):\n return None\n\n zf = zipfile.ZipFile(zip_file_name)\n\n if zf.testzip() is not None:\n return None\n\n # Unzip the files into a temporary directory\n LOGGER.info(\"Extracting zipped file: %s\" % zip_file_name)\n tempdir = tempfile.mkdtemp()\n\n try:\n # Create directories that don't exist\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if (name.endswith(os.path.sep) and not os.path.exists(dest)):\n os.mkdir(dest)\n LOGGER.debug(\"Directory %s created.\" % dest)\n\n # Copy files\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if not (name.endswith(os.path.sep)):\n LOGGER.debug(\"Copying file %s......\" % dest)\n outfile = open(dest, 'wb')\n outfile.write(zf.read(zip_name))\n outfile.close()\n LOGGER.debug(\"File %s copied.\" % dest)\n\n LOGGER.info(\"Unzipped file can be found at %s\" % tempdir)\n return tempdir\n\n except IOError as err:\n LOGGER.error(\"Error in extracting webdriver.xpi: %s\" % err)\n return None", "def unzip(input_filename, extract_dir):\n if not zipfile.is_zipfile(input_filename):\n raise ValueError(\"%s is not a zip file\" % (input_filename))\n zip_ds = zipfile.ZipFile(input_filename)\n zip_ds.extractall(path=extract_dir)\n zip_ds.close()", "def unzip(zfile, md=False):\n\tbasedir = ''\n\tcount = -1\n\tif md:\n\t\tbasedir = prepareBaseDir(zfile)\n\t\n\tzfile = zipfile.ZipFile(zfile, 'r')\n\tfor name in zfile.namelist():\n\t\tcount+=1\n\t\tuname = name.decode('gbk')\n\t\tif uname.endswith('.DS_Store'):\n\t\t\tcontinue\n\t\t\n\t\t#prepare directory\n\t\tdirs = os.path.dirname(uname)\n\t\tif basedir:\n\t\t\tdirs = os.path.join(basedir, dirs)\n\t\tprint 'Extracting: ' + uname\n\t\tif dirs and not os.path.exists(dirs):\n\t\t\tprint 'Prepare directories: ', dirs\n\t\t\tos.makedirs(dirs)\n\t\tif (count == 0):\n\t\t\thomeDir = uname[:-1]\n\t\t#ready to unzip file\n\t\tdata = zfile.read(name)\n\t\tif basedir:\n\t\t\tuname = os.path.join(basedir, uname)\n\t\tif not os.path.exists(uname):\n\t\t\tfo = open(uname, 'w')\n\t\t\tfo.write(data)\n\t\t\tfo.close()\n\tzfile.close()\n\treturn homeDir", "def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass", "def ZipExtract(zipname, filename, path=os.getcwd()):\n try:\n zpf = zipfile.ZipFile(zipname)\n zpf.extract(filename, path)\n zpf.close()\n return True\n except KeyError:\n logging.warning('Could not find %s to extract from %s.',\n (filename, zipname))\n return False", "def _extract_if_zip(tmpdir: str, config: CSCConfig) -> str:\n if os.path.isdir(config.reads):\n return config.reads\n else:\n extracted_dir = os.path.join(tmpdir, f\"{config.input_format}s\")\n os.makedirs(extracted_dir)\n with zipfile.ZipFile(config.reads) as zip_file:\n files = [finfo for finfo in zip_file.infolist() if finfo.filename.endswith(f\".{config.input_format}\")]\n for extract_file in files:\n zip_file.extract(extract_file, extracted_dir)\n return extracted_dir", "def unzip_file(data_zip, path_unzip):\r\n with zipfile.ZipFile(data_zip, \"r\") as zip_temp:\r\n zip_temp.extractall(path_unzip)", "def _do_unzip(zipped_file, output_directory):\n z = zipfile.ZipFile(zipped_file)\n for path in z.namelist():\n relative_path = os.path.join(output_directory, path)\n dirname, dummy = os.path.split(relative_path)\n try:\n if relative_path.endswith(os.sep) and not os.path.exists(dirname):\n os.makedirs(relative_path)\n elif not os.path.exists(relative_path):\n dirname = os.path.join(output_directory, os.path.dirname(path))\n if os.path.dirname(path) and not os.path.exists(dirname):\n os.makedirs(dirname)\n fd = open(relative_path, \"w\")\n fd.write(z.read(path))\n fd.close()\n except IOError, e:\n raise e\n return output_directory", "def unzip(file_loc, extract_loc=None):\n try:\n with zipfile.ZipFile(\n file_loc, \"r\"\n ) as file: # opening the zip file using 'zipfile.ZipFile' class\n print(\"Ok\")\n # ZipFile.infolist() returns a list containing all the members of an archive file\n print(file.infolist())\n\n # ZipFile.namelist() returns a list containing all the members with names of an archive file\n print(file.namelist())\n\n # ZipFile.getinfo(path = filepath) returns the information about a member of Zip file.\n # It raises a KeyError if it doesn't contain the mentioned file\n print(file.getinfo(file.namelist()[-1]))\n\n # If extraction directory not given, extracted to 'data/processed/file_name'\n if extract_loc == None:\n base = os.path.dirname(file_loc)\n folder_name = os.path.basename(base)\n extract_loc = \"data/processed/\" + folder_name\n\n # ZipFile.extractall(path = filepath, pwd = password) extracts all\n # the files to current directory\n file.extractall(path=extract_loc)\n # after executing check the directory to see extracted files\n\n except zipfile.BadZipFile: # if the zip file has any errors then it prints the\n # error message which you wrote under the 'except' block\n print(\"Error: Zip file is corrupted\")\n\n except zipfile.LargeZipFile:\n print(\"Error: File size if too large\") # if the file size is too large to\n # open it prints the error you have written\n except FileNotFoundError:\n print(\"Error: File not found\")", "def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')", "def unzip_file(zipfile_path, target_dir, touchfile_path):\r\n with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:\r\n zip_ref.extractall(target_dir)\r\n\r\n with open(touchfile_path, 'w') as touchfile:\r\n touchfile.write(f'unzipped {zipfile_path}')", "def extract_zip(dataset_filepath, temp_dir):\n with zipfile.ZipFile(dataset_filepath, 'r') as zip_ref:\n zip_ref.extractall(temp_dir)\n\n return os.path.join(temp_dir, os.listdir(temp_dir)[0])", "def prepare(self, location):\n if os.path.isdir(location):\n return location\n else:\n extracted = 0\n tempdir = tempfile.mkdtemp()\n # Maybe it is file or something on http://...\n if os.path.isfile(location):\n fp = open(location, 'r')\n tar = tarfile.open(fileobj=fp, mode='r:*')\n else:\n fp = requests.get(location)\n tar = tarfile.open(fileobj=io.BytesIO(fp.content), mode='r:*')\n # Unzip only files interesting for use. These are markers only now\n for member in tar.getmembers():\n for marker in MARKERS.keys():\n if member.name.endswith(marker):\n tar.extract(member, tempdir)\n extracted += 1\n # If we have extracted succesfully, return directory location\n if extracted > 0:\n return tempdir\n else:\n raise Exception(\"Failed to extract expected files for '%s'\" % location)", "def Extract_zip_file (path_to_zip,dir_to_save_into):\n with zipfile.ZipFile(path_to_zip) as zf:\n \n for member in tqdm(zf.namelist(), desc='Extracting'):\n try:\n if ('annotations' in member) and (member.endswith('.json')): \n zf.extract(member, dir_to_save_into)\n shutil.move(os.path.join(dir_to_save_into,member),dir_to_save_into)\n if ('train' in member):\n zf.extract(member, dir_to_save_into)\n if ('test' in member):\n zf.extract(member, dir_to_save_into)\n if ('val' in member):\n zf.extract(member, dir_to_save_into)\n except zipfile.error as e:\n pass\n\n #delete zip\n os.remove(path_to_zip)\n if(os.path.isdir(os.path.join(dir_to_save_into,'annotations'))):\n # remove the tmp annotations directory\n shutil.rmtree(os.path.join(dir_to_save_into,'annotations'))", "def UnzipFilenameToDir(filename, dir):\n zf = zipfile.ZipFile(filename)\n pushd = os.getcwd()\n if not os.path.isdir(dir):\n os.mkdir(dir)\n os.chdir(dir)\n # Extract files.\n for info in zf.infolist():\n name = info.filename\n if name.endswith('/'): # dir\n if not os.path.isdir(name):\n os.makedirs(name)\n else: # file\n dir = os.path.dirname(name)\n if not os.path.isdir(dir):\n os.makedirs(dir)\n out = open(name, 'wb')\n out.write(zf.read(name))\n out.close()\n # Set permissions. Permission info in external_attr is shifted 16 bits.\n os.chmod(name, info.external_attr >> 16L)\n os.chdir(pushd)", "def extract_zip(filename, target_dir, logger=None):\n logger = logger or Logger()\n import zipfile\n logger.log(\"Extracting from zip archive %s\" % filename, level=1)\n zip_archive = None\n try:\n zip_archive = zipfile.ZipFile(open(filename, mode='r'))\n member_names = zip_archive.namelist()\n # manually extract files since extractall is only in python 2.6+\n# zip_archive.extractall(target_dir)\n for f in member_names:\n if f.endswith('/'):\n dst = \"%s/%s\" % (target_dir, f)\n mkdir_p(dst)\n for f in member_names:\n if not f.endswith('/'):\n path = \"%s/%s\" % (target_dir, f)\n with open(path, 'wb') as dest_file:\n dest_file.write(zip_archive.read(f))\n return member_names\n finally:\n if zip_archive is not None:\n zip_archive.close()", "def unzip(f, targetdir):\n import zipfile\n\n with zipfile.ZipFile(f, \"r\") as zip_ref:\n zip_ref.extractall(targetdir)", "def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))", "def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)", "def main(self, line):\n # filename with optional destination\n args = bash(line)\n if args is None:\n return\n elif not (1 <= len(args) <= 2):\n print \"unzip: Usage: unzip file [destination]\"\n else:\n filename = os.path.abspath(args[0])\n if not os.path.isfile(filename):\n print \"unzip: %s: No such file\" % args[0]\n else:\n # PK magic marker check\n f = open(filename)\n try:\n pk_check = f.read(2)\n except Exception:\n pk_check = ''\n finally:\n f.close()\n if pk_check != 'PK':\n print \"unzip: %s: does not appear to be a zip file\" % args[0]\n else:\n if (os.path.basename(filename).lower().endswith('.zip')):\n altpath = os.path.splitext(os.path.basename(filename))[0]\n else:\n altpath = os.path.basename(filename) + '_unzipped'\n altpath = os.path.join(os.path.dirname(filename), altpath)\n location = (args[1:2] or [altpath])[0]\n if (os.path.exists(location)) and not (os.path.isdir(location)):\n print \"unzip: %s: destination is not a directory\" % location\n return\n elif not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zipf = zipfile.ZipFile(zipfp)\n # check for a leading directory common to all files and remove it\n dirnames = [os.path.join(os.path.dirname(x), '') for x in zipf.namelist()]\n common_dir = os.path.commonprefix(dirnames or ['/'])\n # Check to make sure there aren't 2 or more sub directories with the same prefix\n if not common_dir.endswith('/'):\n common_dir = os.path.join(os.path.dirname(common_dir), '')\n for name in zipf.namelist():\n data = zipf.read(name)\n fn = name\n if common_dir:\n if fn.startswith(common_dir):\n fn = fn.split(common_dir, 1)[-1]\n elif fn.startswith('/' + common_dir):\n fn = fn.split('/' + common_dir, 1)[-1]\n fn = fn.lstrip('/')\n fn = os.path.join(location, fn)\n dirf = os.path.dirname(fn)\n if not os.path.exists(dirf):\n os.makedirs(dirf)\n if fn.endswith('/'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n except Exception:\n zipfp.close()\n print \"unzip: %s: zip file is corrupt\" % args[0]\n return\n finally:\n zipfp.close()", "def unzip_archive(archive):\n tmpdir = os.path.join(tempfile.gettempdir(),\n os.path.basename(archive))\n assert tmpdir != archive # That wouldn't work out\n\n if os.path.exists(tmpdir):\n # files are already extracted\n pass\n else:\n if tarfile.is_tarfile(archive):\n print 'Extracting tarfile ...'\n with tarfile.open(archive) as tf:\n tf.extractall(path=tmpdir)\n elif zipfile.is_zipfile(archive):\n print 'Extracting zipfile ...'\n with zipfile.ZipFile(archive) as zf:\n zf.extractall(path=tmpdir)\n else:\n raise ValueError('Unknown file type for %s' % os.path.basename(archive))\n return tmpdir", "def unzip(local_zip: str, extract_dir: str, pwd: str = None):\n def get_zipinfo_datetime(zipmember: zipfile.ZipInfo) -> datetime:\n zt = zipmember.date_time # tuple: year, month, day, hour, min, sec\n # ZIP uses localtime\n return datetime(zt[0], zt[1], zt[2], zt[3], zt[4], zt[5], tzinfo=tz.tzlocal())\n\n def has_file_changed(zipmember: zipfile.ZipInfo, dst_path):\n st: os.stat_result = None\n try:\n st = os.stat(dst_path, follow_symlinks=False)\n if st.st_size != zipmember.file_size:\n return True\n dst_mtime: datetime = datetime.fromtimestamp(st.st_mtime, tz=tz.tzlocal())\n src_mtime = get_zipinfo_datetime(zipmember)\n if dst_mtime != src_mtime:\n return True\n except (OSError, ValueError):\n return True # does not exist\n return False\n\n def make_symlink(zipmember: zipfile.ZipInfo, symlink_location, is_directory):\n target = zip.read(zipmember, pwd=pwd).decode('utf-8')\n if os.path.lexists(symlink_location):\n os.remove(symlink_location)\n os.symlink(target, symlink_location, target_is_directory=is_directory)\n\n unzipped_files: List[Tuple[zipfile.ZipFile, str]] = []\n\n with zipfile.ZipFile(local_zip, \"r\") as zip:\n for zipmember in zip.infolist():\n dst_path = os.path.normpath(os.path.join(extract_dir, zipmember.filename))\n mode = zipmember.external_attr >> 16\n is_symlink = stat.S_ISLNK(mode)\n #what = 'DIR' if zipmember.is_dir() else 'FILE'\n #what = what + ' LINK' if is_symlink else what\n #print(f'{what} {zipmember.filename} S_IMODE={stat.S_IMODE(mode):0o} S_IFMT={stat.S_IFMT(mode):0o}')\n if zipmember.is_dir(): # make dirs if needed\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=True)\n else:\n os.makedirs(dst_path, exist_ok=True)\n elif has_file_changed(zipmember, dst_path): # only extract if file appears to be modified\n unzipped_files.append((zipmember, dst_path))\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=False)\n else:\n with zip.open(zipmember, pwd=pwd) as src, open(dst_path, \"wb\") as dst:\n shutil.copyfileobj(src, dst)\n for zipmember, dst_path in unzipped_files:\n # set the correct permissions for files and folders\n perm = stat.S_IMODE(zipmember.external_attr >> 16)\n os.chmod(dst_path, perm)\n # always set the modification date from the zipmember timestamp,\n # this way we can avoid unnecessarily modifying files and causing full rebuilds\n time = get_zipinfo_datetime(zipmember)\n #print(f' | {dst_path} {time}')\n mtime = time.timestamp()\n if System.windows:\n os.utime(dst_path, times=(mtime, mtime))\n else:\n os.utime(dst_path, times=(mtime, mtime), follow_symlinks=False)\n\n return len(unzipped_files)", "def unzip(zip_path, output_file, data_folder):\n\n print('Unzipping file: {}'.format(zip_path))\n pyunpack.Archive(zip_path).extractall(data_folder)\n\n # Checks if unzip was successful\n if not os.path.exists(output_file):\n raise ValueError(\n 'Error in unzipping process! {} not found.'.format(output_file))", "def dir_tester_unzip_tmp():\n return abspath('tmpunzip')", "def unzipper(data_address, target_directory):\n import zipfile\n data = \"/home/sharoonsaxena/Datasets/dogs-vs-cats.zip\"\n zip_ref = zipfile.ZipFile(data, \"r\")\n zip_ref.extractall(\"/home/sharoonsaxena/Datasets/extracted/\")\n zip_ref.close()", "def unzip_data(zip_f,data_folder_path): \n\n with zipfile.ZipFile(zip_f,\"r\") as zip_ref:\n zip_ref.extractall(data_folder_path)", "def file_unzipper(directory):\n debug.log(\"Unzipping directory (%s)...\"%directory)\n #FINDING AND UNZIPPING ZIPPED FILES\n for root, dirs, files in os.walk(directory, topdown=False):\n if root != \"\":\n orig_dir = os.getcwd()\n os.chdir(directory)\n Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait()\n Popen('unzip -qq -o \"*.zip\" > /dev/null 2>&1', shell=True).wait()\n Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait()\n os.chdir(orig_dir)", "def _extract_zip(src, dst):\n # check if src is a valid .zip\n assert zipfile.is_zipfile(src), \"{} is not a valid .zip file.\".format(src)\n\n zip_file = zipfile.ZipFile(src, \"r\")\n for file in zip_file.namelist():\n zip_file.extract(file, dst)", "def ZipResultFolder(md5_key, cnt):\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n zipfile_cache = cachedir + \".zip\"\n if os.path.exists(cachedir) and not os.path.exists(zipfile_cache):\n origpath = os.getcwd()\n os.chdir(md5_subfolder)\n targetfile = os.path.join(cachedir, \"query.result.txt\")\n if os.path.exists(targetfile):\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n cmdline = \" \".join(cmd)\n try:\n print((\"%d: %s\"%(cnt, cmdline)))\n subprocess.check_call(cmd)\n print((\"%d: %s\"%(cnt, \"rmtree(%s)\"%(md5_key) )))\n os.system(\"chown %s:%s %s\"%(user, group, \"%s.zip\"%(md5_key)))\n shutil.rmtree(md5_key)\n except:\n print(\"Failed to zip folder %s\"%(cachedir), file=sys.stderr)\n raise\n else:\n print((\"%d: %s\"%(cnt, \"bad result! just rmtree(%s)\"%(md5_key) )))\n shutil.rmtree(md5_key)\n os.chdir(origpath)\n elif os.path.exists(zipfile_cache):\n #check weather the zipped file is a valid prediction result\n try:\n with ZipFile(zipfile_cache, \"rb\") as myzip:\n li = myzip.namelist()\n target = \"%s/query.result.txt\"%(md5_key)\n if target in li:\n print((\"%d: %s\"%(cnt, \"Valid zipped result for %s\"%(md5_key) )))\n else:\n print((\"%d: %s\"%(cnt, \"bad zipped result! just delete zipfile(%s)\"%(md5_key) )))\n os.remove(zipfile_cache)\n except Exception as e:\n print((\"%d: %s\"%(cnt, \"BadZipFile! just delete zipfile(%s)\"%(md5_key) )))\n os.remove(zipfile_cache)", "def mass_extract(source_directory, target_directory):\n\n import os\n import ZipFile\n\n source_directory = raw_input(\"Where are the zips? \")\n target_directory = raw_input(\"To where do you want to extract the files? \")\n \n if not os.path.exists(source_directory):\n print \"Sorry, that folder doesn't seem to exist.\"\n source_directory = raw_input(\"Where are the zips? \")\n\n if not os.path.exists(target_directory):\n os.mkdir(target_directory)\n \n for path, directory, filename in os.walk(source_directory):\n zip_file = ZipFile.ZipFile(filenames)\n ZipFile.extract(zip_file, target_directory)\n zip_file.close()\n\n print \"Done.\"", "def extract_zip(zip_path: pathlib.Path, output_path: pathlib.Path, flatten: bool = True) -> None:\n try:\n with zipfile.ZipFile(zip_path) as zf:\n if flatten:\n for member_info in zf.infolist():\n member_name = member_info.filename\n if member_name.startswith('__MACOSX'):\n # Ignore Mac OS X metadata\n continue\n\n member_base_name = os.path.basename(member_name)\n if not member_base_name:\n # Skip directories\n continue\n\n member_output_path = output_path / member_base_name\n\n with zf.open(member_info) as input_stream, member_output_path.open(\n 'wb'\n ) as output_stream:\n shutil.copyfileobj(input_stream, output_stream)\n else:\n zf.extractall(output_path)\n except zipfile.BadZipfile as e:\n raise ScoreError(f'Could not read ZIP file \"{zip_path.name}\": {str(e)}.')", "def unzip(zip_file, dest_folder):\n zip = zipfile.ZipFile(zip_file, 'r')\n if os.path.exists(dest_folder):\n pass\n else:\n os.makedirs(dest_folder)\n if dest_folder[-1] != '/':\n dest_folder += '/'\n for filename in zip.namelist():\n # --- Folder?\n if filename.endswith('/'):\n if os.path.exists(join(abspath(dest_folder),filename)):\n pass\n else:\n os.makedirs(join(abspath(dest_folder),filename))\n else:\n try:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n except:\n if os.path.exists(normpath((abspath(dest_folder)+'/'+dirname(filename)))):\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n else:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n zip.close", "def extract(fspec: pathlib.Path, dspec: pathlib.Path) -> bool:\n try:\n os.makedirs(str(dspec))\n except FileExistsError:\n pass\n try:\n with zipfile.ZipFile(str(fspec), \"r\") as f:\n f.extractall(str(dspec))\n return True\n except Exception: # noqa\n return False", "def _unzip(filename, branch=None):\n try:\n file = zipfile.ZipFile(filename)\n basename = os.path.dirname(filename)\n basename = basename.replace(\".zip\", \"\")\n file.extractall(path=basename)\n return basename, filename\n except Exception as e:\n six.print_(e)", "def unzip_first(input_filename: str, extract_dir: str) -> str:\n with zipfile.ZipFile(input_filename) as zip_file:\n zip_file_list = zip_file.infolist()\n zip_index = 0\n while zip_index < len(zip_file_list) and zip_file_list[zip_index].is_dir():\n zip_index += 1\n if zip_index == len(zip_file_list):\n res = ''\n else:\n file_to_extract = zip_file_list[zip_index]\n zip_file.extract(file_to_extract, extract_dir)\n res = os.path.join(extract_dir, file_to_extract.filename)\n return res", "def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()", "def unzip_all(input_file: pathlib.Path) -> Tuple[pathlib.Path, tempfile.TemporaryDirectory]:\n output_temp_dir = tempfile.TemporaryDirectory()\n output_path = pathlib.Path(output_temp_dir.name)\n\n extract_zip(input_file, output_path)\n\n return output_path, output_temp_dir", "def zipdir(path, zippath):\n execStr = ['zip', '-r',zippath, path]\n print ' '.join(execStr)\n proc = subprocess.Popen(execStr, stdout=PIPE, stderr=PIPE)\n (output, error) = proc.communicate()\n if error: print 'error: ', error\n print 'output: ', output", "def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(dir_to_extract_to)\n return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'", "def zip_single(source_path, destination_path, password):\n\n if not destination_path:\n destination_path = source_path\n\n if not destination_path.endswith(\".zip\"):\n suffix = destination_path.split(\".\")[1]\n if destination_path.endswith(\".\" + suffix):\n destination_path = destination_path.replace(\".\" + suffix, '')\n destination_path += \".zip\"\n\n if password:\n z = pyzipper.AESZipFile(destination_path, 'w', compression=pyzipper.ZIP_LZMA, encryption=pyzipper.WZ_AES)\n z.setpassword(password)\n else:\n z = pyzipper.ZipFile(destination_path, 'w', compression=pyzipper.ZIP_LZMA)\n\n try:\n z.write(source_path)\n print(f\"{destination_path} created successfully.\")\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)\n finally:\n z.close()", "def _zipfile_single_file_extract_worker(\n zip_file_path: Path,\n file_in_archive: zipfile.ZipInfo,\n destination_folder: Path,\n is_dir: bool,\n) -> Path:\n with _FastZipFileReader(zip_file_path) as zf:\n # assemble destination and ensure it exits\n destination_path = destination_folder / file_in_archive.filename\n\n if is_dir:\n destination_path.mkdir(parents=True, exist_ok=True)\n return destination_path\n desc = f\"decompressing {zip_file_path}:{file_in_archive.filename} -> {destination_path}\\n\"\n with zf.open(name=file_in_archive) as zip_fp, destination_path.open(\n \"wb\"\n ) as dest_fp, tqdm_logging_redirect(\n total=file_in_archive.file_size,\n desc=desc,\n **(\n _TQDM_FILE_OPTIONS\n | dict(miniters=_compute_tqdm_miniters(file_in_archive.file_size))\n ),\n ) as pbar:\n while chunk := zip_fp.read(_CHUNK_SIZE):\n dest_fp.write(chunk)\n pbar.update(len(chunk))\n return destination_path", "def unzip(zipped_file, output_directory=None,\n prefix=\"apsharvest_unzip_\", suffix=\"\"):\n if not output_directory:\n # We create a temporary directory to extract our stuff in\n try:\n output_directory = mkdtemp(suffix=suffix,\n prefix=prefix,\n dir=os.path.join(CFG_TMPSHAREDDIR, 'apsharvest'))\n except Exception, e:\n try:\n os.removedirs(output_directory)\n except TypeError:\n pass\n raise e\n return _do_unzip(zipped_file, output_directory)", "def decryptDir(cipFilename, key):\n\timport zipfile\n\tfrom os import remove\n\tdecryptFile(cipFilename, key)\n\tzipFilename = normalizePath(getDirectoryFilename(cipFilename) + \"\\\\\" + getName(cipFilename) + \".zip\")\n\tunzip = zipfile.ZipFile(zipFilename, 'r')\n\tunzip.extractall(getDirectoryFilename(cipFilename))\n\tunzip.close()\n\tremove(zipFilename)", "def _check_zip_file (filename, path_unzip, outfile) :\n assert path_unzip is not None\n file,ext = os.path.splitext (filename)\n ext = ext.lower ()\n if ext == \".gz\" :\n \n import gzip\n \n if outfile is None :\n dest = filename.split (\"!\")\n dest = dest [ len(dest)-1 ]\n ext = os.path.splitext (dest) [1]\n dest = dest.replace (ext, \".txt\")\n path = os.path.split (filename)\n path = \"/\".join (path [:len (path)-1])\n dest = path + \"/\" + dest\n else :\n dest = outfile\n \n if not os.path.exists (dest) :\n file = gzip.GzipFile (filename, \"r\")\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG (\"ungzipping file (already done)\", dest)\n return dest\n \n fLOG (\"ungzipping file\", dest)\n f = open (dest, \"w\")\n data = file.read (2**27)\n size = 0\n while len (data) > 0 :\n size += len (data)\n fLOG (\"ungzipping \", size, \"bytes\")\n if isinstance (data, bytes) : f.write (bytes.decode (data))\n else : f.write (data)\n data = file.read (2**27)\n f.close ()\n file.close ()\n \n return dest\n \n if ext == \".zip\" :\n \n import zipfile\n try :\n file = zipfile.ZipFile (filename, \"r\")\n except Exception as e :\n fLOG (\"problem with \", filename)\n raise e\n \n if len (file.infolist()) != 1:\n if outfile is not None :\n raise PQHException (\"the archive contains %d files and not one as you expected by filling outfile\" % len (file.infolist()))\n fLOG (\"unzip file (multiple) \", filename)\n #message = \"\\n\".join ([ fi.filename for fi in file.infolist() ] )\n #raise Exception.YstException(\"ColumnInfoSet.load_from_file: file %s contains no file or more than one file\\n\" + message)\n folder = os.path.split (filename) [0]\n todo = 0\n _zip7_path = r\"c:\\Program Files\\7-Zip\"\n zip7 = os.path.exists (_zip7_path)\n wait = [ ]\n for info in file.infolist () :\n fileinside = info.filename\n dest = os.path.join (folder, fileinside)\n if not os.path.exists (dest) :\n fol = os.path.split (dest) [0]\n if not os.path.exists (fol) : os.makedirs (fol)\n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n continue\n \n if not sys.platform.startswith(\"win\") or not zip7 :\n data = file.read (fileinside)\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n fLOG (\"unzipping file\", dest)\n wait.append(dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (str (data))\n else :\n f.write (data)\n f.close ()\n else :\n todo += 1\n \n if todo > 0 and zip7 :\n dest = os.path.realpath (path_unzip)\n cmd = '\"' + _zip7_path + '\\\\7z.exe\" e -y -o\"%s\" \"%s\"' % (dest, os.path.realpath (filename)) \n out,err = run_cmd (cmd, wait = True)\n if len (err) > 0 : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,err))\n if \"Error\" in out : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,out))\n else :\n dest = path_unzip\n \n file.close ()\n \n ch = False\n while not ch :\n ch = True\n for a in wait :\n if not os.path.exists(a) : \n ch = False\n break\n time.sleep(0.5)\n \n return dest\n \n else :\n for info in file.infolist () :\n fileinside = info.filename\n \n path = os.path.split (filename)\n dest = outfile if outfile is not None else path [0] + \"/\" + fileinside\n if not os.path.exists (dest) :\n data = file.read (fileinside)\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG(\"unzipping one file (already done)\", dest)\n return dest\n \n fLOG(\"unzipping one file\", dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (bytes.decode (data))\n else :\n f.write (data)\n f.close ()\n file.close ()\n return dest\n \n return filename", "def unpack(input_filename, extract_dir):\n if not is_archive_file(input_filename):\n raise AttributeError(\"Input_filename must be an archive (ex: .tar.gz, .zip)\")\n if zipfile.is_zipfile(input_filename):\n unzip(input_filename, extract_dir)\n else:\n untar(input_filename, extract_dir)", "def extract_zip(dataset_path, target_path):\n dataset_path = os.path.join(dataset_path,'covidx-cxr2.zip')\n print(f'Extracting zip file: {dataset_path}')\n with ZipFile(file=dataset_path) as zip_file:\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n zip_file.extract(member=file, path=os.path.join(target_path, 'xray'))\n os.remove(dataset_path)", "def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()", "def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir", "def download_and_unzip(url, extract_to='.'):\n http_response = urlopen(url)\n zipfile = ZipFile(BytesIO(http_response.read()))\n zipfile.extractall(path=extract_to)", "def unzip_citibike_data(zip_dir):\n# zip_dir = \"data/citibike-tripdata-nyc/\"\n# csv_dir = \"data/citibike-tripdata-nyc/csv\"\n extension = \".zip\"\n\n # for each zip file in zip_dir extract data\n for item in os.listdir(zip_dir):\n if item.endswith(extension):\n\n # create zipfile object and extract\n file_name = zip_dir + item\n with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n zip_ref.extractall(zip_dir)\n print(item + \" done\")", "def download_extract(name, folder=None): #@save\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir", "def _unzip(save_path, _, database_name, data_path):\r\n print('Extracting {}...'.format(database_name))\r\n with zipfile.ZipFile(save_path) as zf:\r\n zf.extractall(data_path)", "def unzip_single_file(zip_file_name, output_file_name):\n if not os.path.isfile(output_file_name):\n with open(output_file_name, 'wb') as out_file:\n with zipfile.ZipFile(zip_file_name) as zipped:\n for info in zipped.infolist():\n if output_file_name in info.filename:\n with zipped.open(info) as requested_file:\n out_file.write(requested_file.read())\n return", "def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)", "def unzip(file: pathlib.Path, directory: pathlib.Path, quiet_ignore: bool = False):\n if type(directory) is not pathlib.Path:\n directory = pathlib.Path(directory)\n directory.mkdir(parents=True, exist_ok=True)\n if directory.is_dir() and not any(directory.iterdir()):\n with zipfile.ZipFile(file, 'r') as zip_ref:\n zip_ref.extractall(directory)\n else:\n if not quiet_ignore:\n raise EnvironmentError('Target extraction directory is not empty, no files extracted.')", "def download_extract(name, folder=None):\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir", "def unzip(zip_path, cleanup=False):\n zfile = zipfile.ZipFile(zip_path, 'r')\n zfile.extractall(os.path.dirname(zip_path))\n zfile.close()\n if cleanup:\n os.remove(zip_path)", "def unzip_nested_zip(dataset_zip, path_unzip):\r\n\r\n with zipfile.ZipFile(dataset_zip, \"r\") as zfile:\r\n try:\r\n zfile.extractall(path=path_unzip)\r\n except OSError as e:\r\n logging.warning(\r\n \"Please check the unzipped files manually. There may be some missed important files.\"\r\n )\r\n logging.warning(\"The directory is: \" + path_unzip)\r\n for root, dirs, files in os.walk(path_unzip):\r\n for filename in files:\r\n if re.search(r\"\\.zip$\", filename):\r\n file_spec = os.path.join(root, filename)\r\n new_dir = os.path.join(root, filename[0:-4])\r\n unzip_nested_zip(file_spec, new_dir)", "def unzip(self):\n try:\n zipped_files = [f for f in os.listdir('.') if os.path.isfile(f)]\n for file in zipped_files:\n unzipped = self.unzip_file(file)\n if not unzipped:\n logging.error('Failed to unzip {}'.format(file))\n return False\n all_files = [f for f in os.listdir('.') if os.path.isfile(f)]\n for f in all_files:\n os.chmod(f, self.file_mode)\n except Exception as e:\n logging.error('Failed to unzip and change file mode {}'\n .format(e))\n return False\n return True", "def _unzip_archive(archive_path, target_directory, source_path=None, **_):\n\n # Create a temporary directory.\n # Create a zip archive object.\n # Extract the object.\n ctx.logger.debug('Unzipping {src} to {dst}.'.format(\n src=archive_path, dst=target_directory))\n\n src = unzip_archive(archive_path, skip_parent_directory=False)\n copy_directory(src, target_directory)\n remove_dir(src)\n return target_directory", "def download_and_extract_to_mkdtemp(\n bucket: str, key: str, session: Optional[boto3.Session] = None\n) -> str:\n filedes, temp_file = tempfile.mkstemp()\n os.close(filedes)\n download(bucket, key, temp_file, session)\n\n output_dir = tempfile.mkdtemp()\n with zipfile.ZipFile(temp_file, \"r\") as zip_ref:\n zip_ref.extractall(output_dir)\n os.remove(temp_file)\n LOGGER.verbose(\"extracted %s to %s\", temp_file, output_dir)\n return output_dir", "def maybe_extract(filename):\n ext = path.splitext(filename)[1]\n if ext not in EXTRACTORS.keys():\n return None\n # Append the full filepath to the tempdir\n tempdir_root = tempfile.mkdtemp()\n tempdir = path.join(tempdir_root, filename.lstrip('/'))\n os.makedirs(tempdir)\n EXTRACTORS[ext](filename, tempdir)\n rchmod(tempdir_root)\n return tempdir_root", "def _extract_book(local_path, order_hash):\n unzipped_directory = '.'.join((local_path, order_hash, 'uncompressed'))\n\n if not os.path.exists(unzipped_directory):\n os.makedirs(unzipped_directory)\n\n with open(local_path, 'rb') as f:\n zip_file = zipfile.ZipFile(f)\n for name in zip_file.namelist():\n zip_file.extract(name, unzipped_directory)\n\n return unzipped_directory", "def unzip_data(folder, zip_file_basename):\n # path\n filename = os.path.join(folder, zip_file_basename + '.zip')\n new_folder = os.path.join(os.path.dirname(__file__), 'data')\n if not os.path.isdir(new_folder):\n os.mkdir(new_folder)\n # unzip\n if os.path.isfile(os.path.join(new_folder, zip_file_basename + '.csv')):\n return 0\n else:\n zip_file = zipfile.ZipFile(filename, 'r')\n zip_file.extractall(new_folder)\n zip_file.close()\n basename = os.path.join(new_folder, zip_file_basename)\n os.rename(basename + '.txt', basename + '.csv')\n return 1", "def unzip_file(self, filename, location, flatten=True):\n if not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp)\n leading = has_leading_dir(zip.namelist()) and flatten\n for name in zip.namelist():\n data = zip.read(name)\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not os.path.exists(dir):\n os.makedirs(dir)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n finally:\n zipfp.close()", "def unzip(filename,destination=None,force=False):\n if not destination:\n destination=os.path.splitext(os.path.basename(filename))[0]\n destination = os.path.join(tmpdir,destination)\n if os.path.exists(destination):\n if force:\n shutil.rmtree(destination)\n else:\n zipname = filename.split('/')[-1]\n zip_url = \"https://geo.colorado.edu/apps/geolibrary/datasets/{0}\".format(zipname)\n if not os.path.isfile(\"/data/static/geolibrary/datasets/{0}\".format(zipname)):\n shutil.copy(filename,\"/data/static/geolibrary/datasets/{0}\".format(zipname))\n os.remove(filename)\n return {\"folder\": destination,\"zipdata\":False,\"zipurl\":zip_url}\n zip_ref = zipfile.ZipFile(filename,'r')\n zip_ref.extractall(destination)\n zipname = filename.split('/')[-1]\n shutil.copy(filename,\"/data/static/geolibrary/datasets/{0}\".format(zipname))\n zip_url = \"https://geo.colorado.edu/apps/geolibrary/datasets/{0}\".format(zipname)\n os.remove(filename)\n return {\"folder\": destination,\"zipdata\":True,\"zipurl\":zip_url}", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def sample_input_dir():\n tmpdir = tempfile.mkdtemp()\n input_zip = os.path.join(ASSETS_DIR, 'input_dir.zip')\n with zipfile.ZipFile(input_zip, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir)\n yield tmpdir\n shutil.rmtree(tmpdir)", "def unzip(path, filename_as_folder=False):\n for filename in os.listdir(path):\n if filename.endswith(\".zip\"):\n name = os.path.splitext(os.path.basename(filename))[0]\n if not os.path.isdir(name):\n try:\n file = os.path.join(path, filename)\n zip = ZipFile(file)\n if filename_as_folder:\n directory = os.path.join(path, name)\n os.mkdir(directory)\n print(\"Unzipping {} to {}\".format(filename, directory))\n zip.extractall(directory)\n else:\n print(\"Unzipping {} to {}\".format(filename, path))\n zip.extractall(path)\n except BadZipfile:\n print(\"BAD ZIP: \" + filename)\n try:\n os.remove(file)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occured", "def unzip_data(path_to_zipped_data, outdir=None):\n\n config_dir = get_config_dir() if outdir is None else Path(outdir)\n if not config_dir.exists(): config_dir.mkdir(parents=True)\n\n with zipfile.ZipFile(str(path_to_zipped_data), mode='r') as zd:\n zd.extractall(config_dir)\n\n return config_dir", "def unzip_file(zipfilename, unziptodir):\n\tunziptodir = unziptodir.replace(cfg.SEP_DCOMM, cfg.SEP_COMM)\n\tmake_dirs(unziptodir)\n\tzfobj = zipfile.ZipFile(zipfilename)\n\tfor name in zfobj.namelist():\n\t\tname = name.replace(cfg.SEP_DCOMM,cfg.SEP_COMM)\n\t\tif name.endswith(cfg.SEP_COMM):\n\t\t\tos.makedirs(os.path.join(unziptodir, name))\n\t\telse:\n\t\t\text_filename = os.path.join(unziptodir, name)\n\t\t\text_filename = ext_filename.replace(cfg.SEP_DCOMM,cfg.SEP_COMM)\n\t\t\text_dir= os.path.dirname(ext_filename)\n\t\t\tmake_dirs(ext_dir)\n\t\t\toutfile = open(ext_filename, 'wb')\n\t\t\toutfile.write(zfobj.read(name))\n\t\t\toutfile.close()", "def check_extract_zip(p, prefix):\n\n try: # Use zip files if available\n with zipfile.ZipFile(p + prefix + '_nodes.zip', 'r') as zfile:\n zfile.extract(prefix + '_nodes.csv', p)\n with zipfile.ZipFile(p + prefix + '_edges.zip', 'r') as zfile:\n zfile.extract(prefix + '_edges.csv', p)\n return True\n except:\n return False", "def extractZipFiles(rootDir, zipDir):\n for root, dirs, files in os.walk(zipDir, topdown=False):\n for name in files:\n \n zipFiles = os.path.join(root, name)\n \n #Check file extension here\n if \".zip\" not in zipFiles:\n continue\n \n else:\n zipPath = zipfile.ZipFile(zipFiles, 'r')\n #print(zipPath) \n \n filesInZip = zipPath.namelist()\n i = 0 \n for i in range(len(filesInZip)):\n #print(filesInZip[i])\n #print(zipPath.getinfo(filesInZip[i]))\n \n if \".mp3\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".m4a\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".mp4\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".png\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".jpg\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n \n elif \".pdf\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n else:\n print(\"No media found in zip file {0}\".format(name))\n \n zipPath.close()", "def file_zipper(root_dir):\n # FINDING AND ZIPPING UNZIPPED FILES\n for root, dirs, files in os.walk(root_dir, topdown=False):\n if root != \"\":\n if root[-1] != '/': root += '/'\n for current_file in files:\n filepath = \"%s/%s\"%(root, current_file)\n try:\n file_size = os.path.getsize(filepath)\n except Exception as e:\n file_size = 0\n debug.log('Error: file_zipper failed to zip following file '+filepath, e)\n # Excluding small files, gzipped files and links\n if ( file_size > 50\n and current_file[-3:] != \".gz\"\n and not os.path.islink(filepath)\n ):\n if current_file[-4:] == \".zip\":\n # Unzip file\n ec = Popen('unzip -qq \"%s\" -d %s > /dev/null 2>&1'%(filepath, root), shell=True).wait()\n if ec > 0:\n debug.log('Error: fileZipper failed to unzip following file %s'%filepath)\n continue\n else:\n ec = Popen('rm -f \"%s\" > /dev/null 2>&1'%(filepath), shell=True).wait()\n if ec > 0: debug.log('Error: fileZipper failed to delete the original zip file (%s)'%filepath)\n filepath = filepath[:-4]\n # Saving a gzipped version\n with open_(filepath, 'rb') as f, open_(filepath+\".gz\", 'wb', 9) as gz:\n gz.writelines(f)\n # Deleting old (non-zipped) file\n try: os.remove(filepath)\n except OSError as e:\n debug.log((\"WARNING! The file %s could not be \"\n \"removed!\\n%s\")%(current_file, e))", "def setup(zip_path, dest_path):\n\n #makes folder for zip files\n make_directory(zip_path)\n\n #makes folder for processed data\n make_directory(dest_path)", "def _unpack_archive(self):\n with zipfile.ZipFile(self._archive_full_path, 'r') as zip_ref:\n zip_ref.extractall(self._storage_path)\n\n _logger.debug('Archive has been unpacked.')", "def unzip(path):\n zip_ref = zipfile.ZipFile(path, 'r')\n new_path = path[:-3]\n zip_ref.extractall(new_path)\n zip_ref.close()\n return new_path", "def unzip_file(filename, location, flatten=True):\n if not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp)\n leading = has_leading_dir(zip.namelist()) and flatten\n for name in zip.namelist():\n data = zip.read(name)\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not os.path.exists(dir):\n os.makedirs(dir)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n finally:\n zipfp.close()", "def _extract_archive(path: str, extracted_dir_path: str) -> str:\n logging.info('extracting %s to %s', path, extracted_dir_path)\n with tarfile.open(path) as tar:\n tar.extractall(path=extracted_dir_path)\n extracted_items = os.listdir(extracted_dir_path)\n if len(extracted_items) != 1:\n raise ValueError(\n 'archive at {} did not contain a single directory'.format(path))\n return os.path.join(extracted_dir_path, extracted_items[0])", "def _zip_dir(path):\n file_path = '/tmp/iapydependencies-' + uuid.uuid1().hex + \".zip\"\n _make_archive(_get_dir_entries(path, True), file_path, path[0:path.rfind('/')])\n return file_path" ]
[ "0.73367697", "0.68281114", "0.6682046", "0.66589713", "0.6608535", "0.65792376", "0.65135366", "0.65102696", "0.6478845", "0.64773226", "0.64197737", "0.6360199", "0.6358647", "0.6311184", "0.62914145", "0.6283126", "0.6278718", "0.62752926", "0.62581104", "0.62555015", "0.62402403", "0.6229527", "0.62201023", "0.61923414", "0.6169135", "0.61554027", "0.6123011", "0.6120836", "0.610842", "0.6105338", "0.610482", "0.6083242", "0.6082779", "0.60723805", "0.60583717", "0.6028248", "0.6026998", "0.60207564", "0.6013731", "0.5990208", "0.5989903", "0.59828603", "0.5965646", "0.59614635", "0.5956314", "0.5944252", "0.5926909", "0.5926605", "0.5923835", "0.5917563", "0.5909278", "0.5904137", "0.58829504", "0.5882652", "0.58457845", "0.58147144", "0.58107084", "0.5781188", "0.57781637", "0.57750374", "0.5772523", "0.5757138", "0.5752419", "0.5714615", "0.57075137", "0.57063997", "0.5691084", "0.56886536", "0.5688643", "0.5679853", "0.5638535", "0.561261", "0.5612546", "0.5593713", "0.5582118", "0.55806834", "0.55673957", "0.55672747", "0.5559417", "0.55536115", "0.554812", "0.5544868", "0.5530718", "0.5521787", "0.5519788", "0.5516971", "0.5515932", "0.5511294", "0.54931134", "0.54525757", "0.5448757", "0.5419547", "0.54148644", "0.54090714", "0.54086024", "0.540734", "0.5402238", "0.53867257", "0.5384619", "0.5380378" ]
0.7807078
0
Calculate Profit of Order
def calculate_profit(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profit(self):\n retail_value = 0\n wholesale_value = 0\n for bike in self.sold:\n retail_value += bike.total_cost() + (\n self.retail_margin * bike.total_cost())\n wholesale_value += bike.total_cost()\n return retail_value - wholesale_value", "def profit_per_item(self, pk=None):\n total_profit = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit = total_paid - total_cost\n return total_profit", "def get_profit(self):\n # Profit from previous transactions\n values = [t['value'] for t in self.transactions]\n\n profits = []\n base = None\n for v in values:\n if not base:\n base = v\n profit = v - base\n profits.append(profit)\n base = v\n\n return np.array(profits).sum()\n\n # Get all values to get profit\n #return np.array([ s['value'] for s in self.states ]).mean()", "def potential_profit(self):\n potential_profit = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.total_spent() - _House.broker_fee\n return round(potential_profit, 2)", "def calc_profit(self, assignment):\n return sum([self.profit(agent, task)\n for agent, tasks in assignment.items() \n for task in tasks])", "def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit", "def maxProfit(self, prices):\r\n\t\tprofit = 0", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, unitCost, coff):\n\treturn curDemand(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, coff) * (curPrice - unitCost)", "def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs", "def expected_policy_profit(targeting_decision, g, observed_profit, prob_treatment):\n return np.sum(((1-targeting_decision) * (1-g) * observed_profit)/(1-prob_treatment) +\\\n (targeting_decision * g * observed_profit)/(prob_treatment))", "def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage", "def totalProfit(name,sortlist, max):\n result= \"go to \"+ name+\" and buy\"\n tp=0\n for i in range(len(sortlist)):\n if sortlist[i][1][2]>0 :\n if sortlist[i][1][0]<=max:\n max= max-sortlist[i][1][0]\n t=(sortlist[i][1][2] * sortlist[i][1][0])\n result= result+\"\\n\"+str(sortlist[i][1][0])+ \" \"+str(sortlist[i][0])+\" for profit of \"+str(t)\n tp= tp+t\n else:\n t=(sortlist[i][1][2]*max)\n result = result+\"\\n\"+ str(max)+\" \"+str(sortlist[i][0])+\" for profit of \"+str(t)\n tp=tp+t\n max=0\n if max==0:\n break\n if tp==0:\n result= result+ \"\\nno profit\"\n return result,tp", "def calculate(self, order):\n pass", "def calculate_profit_pod(location, destination):\n _profit = []\n for key in destination.price_slip.keys():\n if location.price_slip[key] != 0 and destination.price_slip[key] != 0 and location.price_slip[key][1] != 0 and location.price_slip[key][2] != 0:\n benefit = destination.price_slip[key][0] - location.price_slip[key][1]\n _profit.append([f'{benefit:.2f}'])\n else:\n _profit.append([f'0.00'])\n\n return _profit", "def get_contribution(self):\n salary = self._get_salary()\n if not salary:\n return 0\n # Class 1 NIC.\n contribution = 0\n st = 702\n if salary > st:\n contribution = (salary - st) * 0.138\n return contribution", "def curProfitResponse(curPrice, prevPrice, coff):\n\treturn curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, aPrInc, bPrDec, unitCost, coff)", "def determine_profit(self):\n sqrt_delta_sigma = math.sqrt(self.brownian_delta) * self.brownian_sigma\n brownian_motion = nrand.normal(loc=0, scale=sqrt_delta_sigma)\n sigma_pow_mu_delta = (self.drift_mu - 0.5 * math.pow(self.brownian_sigma, 2.0)) * self.brownian_delta\n geometric_brownian_motion_log_return = brownian_motion + sigma_pow_mu_delta\n retur = np.exp(geometric_brownian_motion_log_return)\n next_profit = self.profit_history[-1] * retur\n return next_profit", "def findAShin(self):\n #return reduce(lambda x, y: x*y, [self.DoS[key].get_price() for key in self.DoS] )\n a = array([self.DoS[key].get_Price() for key in self.DoS])\n return a.prod()**(1.0/len(a))", "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def maxProfit(self, prices):\n profit = 0\n for i in range(1,len(prices)):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n return profit", "def total_profit(knapsack, items, weight):\n return knapsack[items][weight]", "def add_profit(df_gimmes, bet_size):\n df_gimmes['Bet_on_A'] = bet_size * \\\n (df_gimmes['best_ML_B']/100 + 1) / \\\n (df_gimmes['best_ML_A']/100.0 +\n df_gimmes['best_ML_B']/100.0 + 2)\n\n df_gimmes['Bet_on_B'] = bet_size - df_gimmes['Bet_on_A']\n\n df_gimmes['Profit_A'] = df_gimmes['Bet_on_A'] * \\\n df_gimmes['best_ML_A'] / 100.0 + \\\n df_gimmes['Bet_on_A'] - bet_size\n\n df_gimmes['Profit_B'] = df_gimmes['Bet_on_B'] * \\\n df_gimmes['best_ML_B'] / 100.0 + \\\n df_gimmes['Bet_on_B'] - bet_size\n\n return df_gimmes", "def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price", "def investment_price(self):\n invest = self.max_loss / (self.buy_price - self.stop_loss) * self.buy_price\n if invest > self.capital:\n return round(self.capital, 2)\n else:\n return round(invest, 2)", "def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False", "def INVITE_COST(sent, isNonProfit=False):\n cost = 0\n if sent > 100:\n cost = 500 # $5\n if sent > 500:\n cost = 1000 # $10\n if sent > 1000:\n cost = 1500 # $15\n if sent > 2000:\n cost = 2000 # $20\n if sent > 10000:\n cost = 2500 # $25\n if isNonProfit:\n cost = cost * .75\n return int(round(cost))", "def mxprofit(array):\n\n #initialize variables\n minimum_val = 10000\n profit = 0\n # edge cases\n if len(array) <= 1:\n return 0\n # iterate through list and store minimum value\n for i in range(len(array)):\n if array[i] < minimum_val:\n minimum_val = array[i]\n # subtract from minimum value and store profit\n for j in array[i:len(array)]:\n if (j-minimum_val) > profit:\n profit = j - minimum_val\n return profit", "def gross_profit():\n sales_revenue = float(input(\"Expected sales revenue: \"))\n cogs = float(input(\"Cost of goods sold: \"))\n gross_profit = sales_revenue - cogs\n print(\"Sales Revenue: {}\\nCOGS: {}\\nGross Profit: {}\".format(sales_revenue, cogs, gross_profit))\n return gross_profit, sales_revenue", "def GetSpeculated(self):\n return self.money + sum([self.share[i] * self.price[i][0] * (1 + self.taxe) for i in self.price])", "def _update_profit(self, cost: float):\n\n if cost > 0:\n self._total_debit += cost\n else:\n self._total_credit += -(cost)", "def before_tax_profit(self):\n\n _before_tax_profit = self.net_revenue() - self.depreciation()\n return _before_tax_profit", "def clear_dividends(self, profit: float):\n for agent in self.agents.values():\n agent.budget += profit / self.n_agents", "def _compute_calculate_cost(self):\n for order in self:\n amount_calculate_cost = 0.0\n for line in order.order_line:\n amount_calculate_cost += (line.product_id.standard_price * line.product_uom_qty)\n order.update({\n 'amount_calculate_cost': amount_calculate_cost\n })", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def maxProfit(self, prices):\n if not prices:\n return 0\n \n today = 0\n total_profit = 0 \n \n min_price = prices[0]\n \n while today < len(prices):\n if prices[today] < min_price:\n # keep the lowest price\n min_price = prices[today]\n \n tomorrow = today + 1\n if tomorrow >= len(prices): # is the last day?\n if min_price < prices[today]:\n total_profit += prices[today] - min_price\n break\n \n elif prices[tomorrow] < prices[today]: # price going down, we sell out\n if min_price < prices[today]:\n total_profit += (prices[today] - min_price)\n \n min_price = prices[tomorrow] # can not buy today, start from tomorrow\n today = tomorrow + 1\n else: \n today = tomorrow # keep the stock\n \n return total_profit", "def liquidate(self) -> None:\n if self.position.is_close:\n return\n\n if self.position.pnl > 0:\n self.take_profit = self.position.qty, self.price\n else:\n self.stop_loss = self.position.qty, self.price", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def calc_fair_profit(self, assignment):\n fair_profit = {t:0 for t in self.tasks}\n for agent, tasks in assignment.items():\n for task in tasks:\n fair_profit[task] += self.profit(agent, task)\n return min(fair_profit.values())", "def cost(self) -> float:", "def fidelity_promo(order: Order) -> float: # <3>\n return order.total() * 0.05 if order.customer.fidelity >= 1000 else 0", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def get_commission(self, price):\n return 2.0 + price * 0.00008", "def find_max_profit(stock_prices,k):\n\teliminated_indices = set()\n\ttotal_profit = 0\n\n\t\n\tfor i in range(0,k):\n\t\tmax_profit = float('-inf')\n\t\tmin_price = float('inf')\n\t\t\n\t\tfor current_index,current_price in enumerate(stock_prices):\n\t\t\t# This condition takes care of note by making sure that \n\t\t\t# prices are not used in previous transaction.\n\t\t\tif current_index not in eliminated_indices:\n\t\t\t\tcurrent_profit = current_price - min_price\n\n\t\t\t\tif (current_profit > max_profit):\n\t\t\t\t\tbuying_price_index = min_price_index\n\t\t\t\t\tselling_price_index = current_index\n\t\t\t\t\tmax_profit = current_profit\n\n\t\t\t\t#min_price = min(min_price, current_price)\n\t\t\t\tif (current_price < min_price):\n\t\t\t\t\tmin_price = current_price\n\t\t\t\t\tmin_price_index = current_index\n\n\n\t\t# This for loop is to take care of Note\n\t\tfor i in range(buying_price_index,selling_price_index+1):\n\t\t\teliminated_indices.add(i)\n\n\t\ttotal_profit += max_profit\n\t\tprint('buying_price_index :',buying_price_index)\n\t\tprint(\"selling_price_index :\",selling_price_index)\n\n\treturn total_profit", "def buy_and_pay(self):\n return self.price", "def _compute_amount_qty_delivered(self):\n for line in self:\n # if line.product_id.invoice_policy == 'delivery':\n # qty = line.qty_delivered\n # else:\n # qty = line.product_uom_qty\n # line.price_total_without_discount = qty * line.price_unit\n # line.price_discount = (line.price_total_without_discount * line.discount) / 100\n line.update({\n # 'price_discount': line.price_discount,\n # 'price_total_without_discount': line.price_total_without_discount,\n 'sea_price_total_qty_delivered': line.untaxed_amount_to_invoice + line.untaxed_amount_invoiced,\n })", "def calculate(self) -> float:", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def get_expected_cost(self):", "def incumbant_firm(self, wage):\n \n \n \n # a. demand for capital (capital policy function)\n pol_k = (self.alpha /(self.ret *(1+self.tau_capital)))**((1-self.gamma)/(1-self.gamma-self.alpha)) \\\n * (self.gamma /(wage * (1+self.tau_labor)))**(self.gamma/(1-self.gamma-self.alpha)) \\\n * (self.grid_s_matrix*(1-self.tau_output))**(1/(1-self.alpha-self.gamma))\n \n # b. demand of labor (labor policy function)\n pol_n = (1+self.tau_capital) * self.ret * self.gamma / ((1+self.tau_labor) * wage * self.alpha) * pol_k\n #pol_n = ((smatrix*(1-self.tau_output) * gamma) / wage)**(1/(1-gamma)) * pol_k**(alpha/(1-gamma))\n \n # c. incumbant profit\n pi=(1-self.tau_output) * self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma \\\n - (1+self.tau_labor)* wage * pol_n - (1+self.tau_capital) * self.ret * pol_k - self.cf\n \n # d. discounted present value of an incumbent establishment, W(s,pol_k(s,theta))\n W = pi / (1-self.rho)\n \n return pol_k, pol_n, pi, W", "def total_spent(self):\n total_sum = Order.objects.filter(\n email=self.email).aggregate(\n Sum('total_price')\n ).get('total_price__sum')\n return round(total_sum, 4) if total_sum else 0", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def cost_volume_profit():\r\n c = float(input(\"Please Enter Total Fixed Costs Value: \"))\r\n a = float(input(\"Please Enter Sale Price Per Unit: \"))\r\n b = float(input(\"Please Enter Variable Cost Per Unit: \"))\r\n ccm = float(a)-float(b)\r\n cuu = float(c)/float(ccm)\r\n ccmr = (float(ccm)/float(a))*float(100)\r\n cda = float(c)/(float(ccmr)/float(100))\r\n print \">> Your Contribution Margin is\",ccm\r\n print \">> Your Breakeven Sales in Units is\",round(cuu)\r\n print \">> Your Contribution Margin Ratio is\",ccmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",cda,\"\\n\"\r\n qq = input(\" Press 1 To Compute Target Profit\\n Press 2 To Compute Margin of Safety\\n Press 3 To Perform Sensitivity Analysis\\n Or Press 0 To Exit: \")\r\n if(qq == 1):\r\n dds = float(input(\"Please Enter Your Target Profit: \"))\r\n xxx = (float(c)+float(dds))/float(ccm)\r\n xxxx = (float(c)+float(dds))/(float(ccmr)/float(100))\r\n print \">> Your Target Profit in Units To Earn\",dds,\"$ is\",round(xxx)\r\n print \">> Your Target Profit in Dollars To Earn\",dds,\"$ is\",xxxx\r\n elif(qq == 0):\r\n print \"Canceled\"\r\n elif(qq == 2):\r\n xc = float(input(\"Please Enter Expected Sales in Units: \"))\r\n zzz = float(xc)-float(cuu)\r\n zzzz = float(zzz)*float(a)\r\n print \">> Your Margin of Safety in Units is\",round(zzz)\r\n print \">> Your Margin of Safety in Dollars is\",zzzz\r\n elif(qq == 3):\r\n i = input(\"Please Enter Total Fixed Costs Value: \")\r\n o = input(\"Please Enter Sale Price Per Unit: \")\r\n p = input(\"Please Enter Variable Cost Per Unit: \")\r\n n = 0\r\n for x,y,z in zip(i,o,p):\r\n cm = float(y)-float(z)\r\n uu = float(x)/float(cm)\r\n cmr = (float(cm)/float(y))*float(100)\r\n da = float(x)/(float(cmr)/float(100))\r\n n += 1\r\n print \"Your Results in Case\",int(n),\"is :\"\r\n print \">> Your Contribution Margin is\",cm\r\n print \">> Your Breakeven Sales in Units is\",round(uu)\r\n print \">> Your Contribution Margin Ratio is\",cmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",da,\"\\n\"\r\n if(cm > ccm):\r\n a = float(cm)-float(ccm)\r\n print \">> Your Contribution Margin Increased by\",a\r\n elif(ccm > cm):\r\n a = float(ccm)-float(cm)\r\n print \">> Your Contribution Margin Decreased by\",a\r\n if(uu > cuu):\r\n b = float(uu)-float(cuu)\r\n print \">> Your Breakeven Sales in Units Increased by\",round(b)\r\n elif(cuu > uu):\r\n b = float(cuu)-float(uu)\r\n print \">> Your Breakeven Sales in Units Decreased by\",round(b)\r\n if(cmr > ccmr):\r\n c = float(cmr)-float(ccmr)\r\n print \">> Your Contribution Margin Ratio Increased by\",c,\"%\"\r\n elif(ccmr > cmr):\r\n c = float(ccmr)-float(cmr)\r\n print \">> Your Contribution Margin Ratio Decreased by\",c,\"%\"\r\n if(da > cda):\r\n d = float(da)-float(cda)\r\n print \">> Your Breakeven Sales in Dollars Increased by\",d\r\n elif(cda > da):\r\n d = float(cda)-float(da)\r\n print \">> Your Breakeven Sales in Dollars Decreased by\",d,\"\\n\"", "def after_tax_profit(self):\n _after_tax_profit = self.before_tax_profit() * (1 - self.tax_rate)\n return _after_tax_profit", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def max_profit(prices: List[int]) -> int:", "def valuation(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.plus_grand().get_coefficient()\n\t\telse:\n\t\t\treturn rationnel()", "def profit_loss(self) -> float:\n return self.net_worth / self.initial_net_worth", "def calculoFitness(self,sumaPoblacion):\n self._funcFitness = self._funcObjetivo / sumaPoblacion", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def total_sales():\n data = []\n orders = Order.objects.all()\n for order in orders:\n data.append(order.get_total_cost())\n return sum(data)", "def _get_toal_sp_(obj):\n \n fTotalSP = 0.0\n for item in obj.order_line:\n fTotalSP += item.price_subtotal\n \n return fTotalSP", "def pro_rata(buy_orders, sell_orders):\n\n \n \"\"\"trade_matrix (numpy.array): matrix for traded shares set buy and sell shares to new amount\"\"\"\n\n if len(buy_orders) == 0 or len(sell_orders) == 0: return np.array([])\n current_buy_orders_length = len(buy_orders)\n current_sell_orders_length = len(sell_orders)\n\n # get total volume of buy\n volume_buy = 0\n for i in range(current_buy_orders_length):\n volume_buy += buy_orders[i].left_quantity\n\n # get total volume of sell\n volume_sell = 0\n for i in range(current_sell_orders_length):\n volume_sell += sell_orders[i].left_quantity\n\n # compare volumes\n if volume_sell > volume_buy:\n sell_buy_diff = volume_sell - volume_buy - 1\n while sell_buy_diff > 0:\n sell_buy_diff -= sell_orders[current_sell_orders_length - 1].left_quantity\n current_sell_orders_length -= 1\n\n sum_of_weighted_orders = 0\n\n for i in range(current_sell_orders_length):\n sum_of_weighted_orders += buy_orders[i].left_quantity * (i + 1)\n\n # list of transactions, line is seller(i), row is buyer(j)\n trade_matrix = np.zeros(shape=(len(sell_orders), len(buy_orders)))\n\n # time pro rata algorithm\n p = []\n for i in range(current_buy_orders_length):\n p.append((buy_orders[i].left_quantity * buy_orders[i].price * (i + 1)) / sum_of_weighted_orders)\n\n P = []\n for i in range(current_buy_orders_length):\n comp = [buy_orders[i].left_quantity * buy_orders[i].price, np.floor(p[i] * current_sell_orders_length)]\n P.append(np.min(comp))\n\n for i in range(current_sell_orders_length):\n while sell_orders[i].left_quantity > 0:\n for j in range(current_buy_orders_length):\n if P[j] > 0:\n P[j] -= 1\n buy_orders[j].left_quantity -= 1\n sell_orders[i].left_quantity -= 1\n trade_matrix[[i], [j]] += 1\n if sell_orders[i].left_quantity == 0:\n break\n\n return trade_matrix", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def net_position(self):\n average_price = 0\n sum = 0\n for transaction in self.transactions:\n average_price += abs(transaction[0]/transaction[1])\n sum += transaction[1]\n\n average_price /= len(self.transactions) \n average_price *= sum\n \n return average_price", "def _compute_order_priorities_stats(self, orders):\n order_prices = {}\n tab_limits = {}\n tab_demands = {}\n total_fulfilled_prices = Counter()\n valid_statuses = set([Order.STATUS_OPEN, Order.STATUS_FULFILLED])\n\n for order in orders:\n if order.status not in valid_statuses:\n bodega_value_error(\n log,\n ('Order %s status %s is not valid for computing '\n 'price-based priority') % (order, order.status))\n\n order_price = 0.0\n if not order.maintenance:\n # We currently assume that each user has a single tab,\n # but this may change in the future.\n if order.tab.sid not in tab_limits:\n tab_limits[order.tab.sid] = order.tab.limit\n\n if order.tab.sid not in tab_demands:\n tab_demands[order.tab.sid] = 0.0\n\n # Compute order price as a sum of its items' prices.\n item_prices = \\\n self.item_tools.get_prices_for_items(order.items.items())\n order_price = sum(item_prices.values())\n\n if order.status == Order.STATUS_FULFILLED:\n total_fulfilled_prices[order.tab.id] += order_price\n\n tab_demands[order.tab.sid] += order_price\n\n log.debug('Order %s has a price of %s' % (order, order_price))\n order_prices[order.sid] = order_price\n\n total_tab_limit = sum(tab_limits.values())\n\n # Generate a list of tab_demands / tab_limit to compute the median\n # demand\n tab_demand_per_limit = sorted(\n [tab_demands[key] / tab_limits[key]\n for key in tab_demands])\n\n if total_tab_limit < 0:\n bodega_value_error(\n log,\n 'Total tab limit is negative: %s' % total_tab_limit)\n elif total_tab_limit == 0:\n if orders:\n bodega_value_error(\n log,\n ('Total tab limit is 0 for non-empty list of orders. '\n 'This may be due to a race condition in between the time '\n 'we collect the tab ids and fetch their limits.'))\n median_demand = None\n else:\n median_demand = statistics.median(tab_demand_per_limit)\n\n order_priority_stats = {\n 'median_demand': median_demand,\n 'order_prices': order_prices,\n 'tab_limits': tab_limits,\n 'total_fulfilled_prices': dict(total_fulfilled_prices)\n }\n\n log.debug('Order priority stats: %s' % order_priority_stats)\n return order_priority_stats", "def total_spent(self):\n\n approved_jobs = self.approved_jobs()\n expenses = self.expenses()\n\n total = 0\n for job in approved_jobs:\n total += job.total_paid\n\n for expense in expenses:\n total += expense.amount\n\n return float(round(total, 2))", "def test_order_cost_money(self):\n\t\tself.g.resolve_current_turn()\n\t\tself.assertEqual(self.reload(self.p).money, self.initial_money - BuyInfluenceOrder.BASE_COST)", "def maxProfit(self, prices):\n l = len(prices)\n if l <= 1:\n return 0\n dp = [0] * len(prices)\n r = prices[1] - prices[0]\n m = prices[0]\n for i in range(2, l):\n m = min(prices[i - 1], m)\n r = max(r, prices[i] - m)\n \n return r if r >= 0 else 0", "def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum", "def final(order_dict):\r\n gap = (\"=\" * 50)\r\n overall_potential_cost = 0\r\n print(\"Thank you for using this program\")\r\n print(gap)\r\n print(\"Registrations of Interest in EV Subsidy Received:\")\r\n print(gap)\r\n \"\"\"\r\n for orders in order_dict:\r\n print(\"Order: {}\".format(orders))\r\n overall_potential_cost += order_dict[orders][\"Total Subsidy\"]\r\n for items in order_dict[orders]:\r\n if items == \"Cars on order\":\r\n occ_of_car = {}\r\n for car in order_dict[orders][\"Cars on order\"]:\r\n occ_of_car = order_dict[orders][\"Cars on order\"].count(car)\r\n print(\"{} x {} @ ${}\".format(occ_of_car, car, 200))\r\n else:\r\n print(\"{}: {}\".format(items, order_dict[orders][items]))\r\n \"\"\"\r\n for orders in order_dict:\r\n if order_dict[orders] != \"canceled\":\r\n print(\"Order: {}\".format(orders))\r\n overall_potential_cost += order_dict[orders][\"Total Subsidy\"]\r\n for items in order_dict[orders]:\r\n if items == \"Cars on order\":\r\n cars_dict = {}\r\n for car in order_dict[orders][\"Cars on order\"]:\r\n if car in cars_dict.keys():\r\n cars_dict[car] += 1\r\n else:\r\n cars_dict[car] = 1\r\n\r\n for car_name in cars_dict.keys():\r\n print(\"{} x {}\".format(car_name, cars_dict[car_name]))\r\n\r\n else:\r\n print(\"{}: {}\".format(items, order_dict[orders][items]))\r\n \r\n else:\r\n print(\"Order {} was canceled\".format(orders))\r\n print(gap)\r\n\r\n print(\"Total orders: {}\".format(len(order_dict)))\r\n print(\"Overall potential cost of these orders = {}\".format(overall_potential_cost))\r\n print(gap)\r\n return", "def sell_function(data_points, n_days):\n\n prediction_df = model_arima(data_points, n_days)\n\n # Calculates the profit for each day predicted taking into account running\n # costs and compares it with the highest profit so far.\n profit_array = [None] * (n_days + 1)\n\n for count in range(n_days):\n price = prediction_df.iloc[count][\"Prediction\"]\n profit_array[count + 1] = price\n\n return profit_array", "def update_profit(self):\n # Acessing Redis can cause greenlet switches because new jobs. We don't\n # want to potentially switch jobs multiple times quickly, so we update\n # the profitability information all at once after the loop to avoid\n # multiple network switches\n new_price_data = {}\n for manager in self.jobmanagers.itervalues():\n currency = manager.config['currency']\n pscore = self.redis.get(\"{}_profit\".format(currency))\n\n # Deserialize\n if pscore:\n try:\n pscore = simplejson.loads(pscore, use_decimal=True)\n except Exception:\n self.logger.warn(\n \"Error parsing profit score for {}! Setting it to 0..\"\n .format(currency))\n pscore = 0\n pass\n # If no score was grabbed, pass a 0 value score\n else:\n self.logger.warn(\"Unable to grab profit info for {}!\"\n .format(currency))\n pscore = 0\n\n ratio = self.redis.get(\"{}_ratio\".format(currency)) or 1.0\n ratio = float(ratio)\n\n # Only set updated if it actually changed\n if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:\n new_price_data[currency] = (pscore, ratio, time.time())\n\n # If we have some new information, adjust accordingly\n if new_price_data:\n self.logger.info(\"Updated price information for {}\"\n .format(new_price_data.keys()))\n # Atomic update in gevent\n self.price_data.update(new_price_data)\n\n # Update all the profit info. No preemption, just maths\n for currency in self.jobmanagers.iterkeys():\n self.update_profitability(currency)\n\n self.logger.debug(\n \"Re-checking best network after new price data for {}\"\n .format(new_price_data.keys()))\n self.check_best()", "def trend_price_up(self):\n raise NotImplementedError()", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def total(proportions):\n final = {}\n for i in proportions:\n if i in running_total:\n final[i] = proportions[i] * running_total[i]\n print(final)\n else:\n final[i] = 0\n print(final)\n\n total_sum = sum(final.values())\n return total_sum", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def somme_encaissee(self) -> Numeric:\n return query_sum(\n self.offres().filter(paye=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def profitcal(dict1,dict2):\n for i in dict1:\n dict1[i].append('x')\n\n for i in dict2:\n dict2[i].append('x')\n\n for i in dict1:\n x=(dict1[i][1]-dict2[i][1])\n dict1[i][2]=-x\n dict2[i][2] = x\n return dict1,dict2", "def karma(self):\n total = (sum(oc.amount for oc in self.order_contributions if oc.is_external)\n - sum(o.external_contribution for o in self.own_orders))\n return total or Decimal('0.00')", "def get_sell_cost(self):\n return round(0.75 * self.sell_price[self.level - 1])", "def profitCalculation(confusion_matrix):\n numberofClasses = 4\n profits = [[20, -7, -7, -7], [-7, 15, -7, -7], [-7, -7, 5, -7], [-3, -3, -3, -3]]\n totalProfit = 0\n for count in range(numberofClasses):\n for counter in range(numberofClasses):\n totalProfit += confusion_matrix[count][counter] * profits[count][counter]\n\n return totalProfit", "def tot(self, prop=\"基金现值\", date=yesterdayobj()):\n res = 0\n for fund in self.fundtradeobj:\n res += fund.dailyreport(date).iloc[0][prop]\n return res", "def trading_cost(self) -> float:\n return self.__trading_cost", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def best_promo(order: Order) -> Decimal:\n return max(promo(order) for promo in promos) # <3>", "def profit_curve(cost_benefit, predicted_probs, labels):\n n_obs = float(len(labels))\n # Make sure that 1 is going to be one of our thresholds\n maybe_one = [] if 1 in predicted_probs else [1] \n thresholds = maybe_one + sorted(predicted_probs, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = predicted_probs >= threshold\n confusion_matrix = standard_confusion_matrix(labels, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0", "def maxProfit(self, prices):\n np = 0\n p = float('-inf')\n cd = float('-inf')\n for price in prices:\n p, np, cd = max(np - price, p), max(np, cd), p + price\n return max(np, cd)", "def maxProfit(self, prices):\n # No way to make a profit without at least two days of history.\n if len(prices) < 2:\n return 0\n\n # Init S0 and S2 with the negative of the highest price. This is the\n # lowest possible drawdown with an optimal strategy.\n topPrice = max(prices)\n state = [- topPrice, 0, - topPrice, 0]\n\n for p in prices:\n nextState = list(state)\n\n # Either stay at this current state or buy at a lower price if\n # possible.\n nextState[0] = max(state[0], - p)\n\n # Stay at the current state or sell the stock we bought in S0 at a\n # higher price.\n nextState[1] = max(state[1], state[0] + p)\n\n # If we have a positive balance after completing the first\n # transaction, it makes sense to begin the second. There's no\n # reason to begin the second transaction if the first transaction\n # is in a losing position because we might be able to recoupe\n # losses by selling at a future price with may be higher.\n if state[1] > 0:\n nextState[2] = max(state[2], state[1] - p)\n\n # Stay at the current state for the second transaction or sell at a\n # higher price.\n nextState[3] = max(state[3], state[2] + p)\n\n state = nextState\n\n return max(state[1], state[3])", "def spread(self):\n if self.bid and self.offer:\n return self.offer[0].price - self.bid[-1].price\n\n return 0", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def find_max_profit(prices):\n profit = []\n for index, price in enumerate(prices):\n buy = prices[index]\n sell_list = prices[index + 1:]\n if sell_list != []:\n for sell_price in sell_list:\n profit.append(sell_price - buy)\n return sorted(profit)[-1]", "def other_opex_remaining(self) -> float:\n return (\n self.income_statement.opex.other_opex\n - self.depot_overhead_cost(self.operations.productivity.avg_num_trucks)\n - self.maintenance_cost(self.operations.productivity.avg_num_trucks)\n - self.fuel_cost(self.operations.productivity.avg_num_trucks)\n )", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def get_buy_and_sell_costs(self, orders):\n\n buy_cost = sell_cost = 0\n for _, order in orders.items():\n if order[\"descr\"][\"type\"] == \"buy\":\n buy_cost += float(order[\"cost\"])\n else:\n sell_cost += float(order[\"cost\"])\n\n return buy_cost, sell_cost", "def BuyingPrice(self):\n return self.buying_rice" ]
[ "0.73407346", "0.70521134", "0.69925046", "0.66937786", "0.664156", "0.6609153", "0.660143", "0.64814395", "0.63270825", "0.62858367", "0.62036735", "0.6141628", "0.6069652", "0.6067097", "0.6044266", "0.60409504", "0.602659", "0.6025829", "0.6020886", "0.6019667", "0.6008011", "0.59903014", "0.59768826", "0.59436584", "0.5936405", "0.59282374", "0.5919609", "0.5914812", "0.5902267", "0.5887112", "0.5867797", "0.5851415", "0.5827295", "0.5822677", "0.5809203", "0.5793585", "0.5793585", "0.5792806", "0.57794917", "0.5774741", "0.5768972", "0.57682663", "0.5745518", "0.57272834", "0.57271063", "0.5724689", "0.57230705", "0.57217103", "0.5714856", "0.57148504", "0.56978303", "0.5694829", "0.56883466", "0.5687774", "0.567575", "0.5674203", "0.56729984", "0.56670064", "0.5666345", "0.56637573", "0.56540054", "0.5653704", "0.5650888", "0.56394255", "0.5634845", "0.5620298", "0.5611433", "0.5611144", "0.56076175", "0.5602636", "0.55989355", "0.55961525", "0.55934453", "0.5584759", "0.5582423", "0.55794", "0.5577665", "0.5570368", "0.5562549", "0.5561673", "0.5559925", "0.55598176", "0.555796", "0.5550567", "0.5550292", "0.55486584", "0.5544576", "0.55421257", "0.5541375", "0.5530754", "0.55280584", "0.55257946", "0.55173206", "0.5503644", "0.5496893", "0.5492295", "0.5490726", "0.54905474", "0.54777217", "0.5472367" ]
0.79486245
0
Try to close when TakeProfit or StopLoss hits.
def try_to_close(self, candle_high, candle_low, time, pre_candle_close=0.0, pre_time=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n\t\tself.applied = 0", "def _close(self):\n # TODO\n self.holding = False", "def handle_close(self):\n self.active = False\n self.close()", "def close(self):\n super(OpenCNTradeContext, self).close()", "def close(self):\n self._normal_close = True\n\n self.cancel()", "def close(self):\n # This is a NOOP by default", "def close(self):\n # This is a NOOP by default", "def close(self):\n\n return False", "def __exit__(self, *args):\n self.close()\n return False", "def close(self):\n # By default, this is a NOOP", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.exit()", "def do_close(self):\n self.cleanup(True)\n self.close()", "def _doCloseTool(self):\n self._cmdCloseTool()", "def close(self) -> bool:\n return True", "def eval_exit(self):\n if self.get_position_size() == 0:\n return\n\n unrealised_pnl = float(self.get_position()['unRealizedProfit'])\n\n # trail asset\n if self.get_exit_order()['trail_offset'] > 0 and self.get_trail_price() > 0:\n if self.get_position_size() > 0 and \\\n self.get_market_price() - self.get_exit_order()['trail_offset'] < self.get_trail_price():\n logger.info(f\"Loss cut by trailing stop: {self.get_exit_order()['trail_offset']}\")\n self.close_all()\n elif self.get_position_size() < 0 and \\\n self.get_market_price() + self.get_exit_order()['trail_offset'] > self.get_trail_price():\n logger.info(f\"Loss cut by trailing stop: {self.get_exit_order()['trail_offset']}\")\n self.close_all()\n\n #stop loss\n if unrealised_pnl < 0 and \\\n 0 < self.get_exit_order()['loss'] < abs(unrealised_pnl):\n logger.info(f\"Loss cut by stop loss: {self.get_exit_order()['loss']}\")\n self.close_all()\n\n # profit take\n if unrealised_pnl > 0 and \\\n 0 < self.get_exit_order()['profit'] < abs(unrealised_pnl):\n logger.info(f\"Take profit by stop profit: {self.get_exit_order()['profit']}\")\n self.close_all()", "def __exit__(self, *args):\n self.close()\n # propagate exception\n return False", "def close_and_exit(self):\n self.close()\n sys.exit(1)", "def close(self):\n\n if self._state == states['open']:\n self._do_close()", "def __exit__(self, exc_type, exc_value, traceback) -> bool:\n self.close()\n return False", "def _onCancel(self):\n\n self.close()", "def close(self) -> None:\r\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def _close(self):\n log.Debug('dpbx.close():')", "def close():\n sys.exit()", "def close(self): # from gym/core.py\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "async def _handle_stop_loss(self, trade: Dict[str, Any]) -> bool:\n\n pair = trade['pair']\n current_value = self.market.adjusted_close_values[pair][-1]\n\n if current_value < trade['cutoff_value']:\n stop_percent = config['trade_dynamic_stop_percent'] * trade['soft_stops']\n trade['stop_value'] *= (1.0 + stop_percent)\n if trade['stop_value'] > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n\n elif current_value < trade['check_value']:\n trade['stop_value'] *= (1.0 + config['trade_dynamic_stop_percent'])\n if trade['stop_value'] > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n\n if current_value <= trade['stop_value']:\n coro = self._trade_methods['sell'](trade, 'SOFT STOP SELL', 'soft_stop')\n utils.async_task(coro, loop=common.loop)\n self.trades[pair]['closed'] = []\n return True\n\n return False", "def close(self):\n self.solenoid.set(self.CLOSE)", "def _terminate(self) -> None:\n if not jh.should_execute_silently() or jh.is_debugging():\n logger.info(f\"Terminating {self.symbol}...\")\n\n self.before_terminate()\n\n self._detect_and_handle_entry_and_exit_modifications()\n\n # fake execution of market orders in backtest simulation\n if not jh.is_live():\n store.orders.execute_pending_market_orders()\n\n if jh.is_live():\n self.terminate()\n return\n\n if self.position.is_open:\n store.app.total_open_trades += 1\n store.app.total_open_pl += self.position.pnl\n logger.info(\n f\"Closed open {self.exchange}-{self.symbol} position at {self.position.current_price} with PNL: {round(self.position.pnl, 4)}({round(self.position.pnl_percentage, 2)}%) because we reached the end of the backtest session.\"\n )\n # first cancel all active orders so the balances would go back to the original state\n if self.exchange_type == 'spot':\n self.broker.cancel_all_orders()\n # fake a closing (market) order so that the calculations would be correct\n self.broker.reduce_position_at(self.position.qty, self.position.current_price, self.price)\n self.terminate()\n return\n\n if len(self.entry_orders):\n self._execute_cancel()\n logger.info('Canceled open-position orders because we reached the end of the backtest session.')\n\n self.terminate()", "def close(self) -> None:\n pass", "def close(self) -> None:\n pass", "def close(self) -> None:\n pass", "def end(self, won, reason):\n pass\n # replace with your end logic", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "async def close(self, gracefully: bool = False) -> None:", "async def _close(self):\n pass", "def close(self):\n \n self.__exit__(None, None, None)\n return", "def close(self):\n self.k2000.close()", "def stoploss(self):\n price = float(self.price)\n print(\"orderPrice1:\",self.dentry[\"orderPrice1\" + self.chartnumber])\n if (self.dentry[\"orderPrice1\" + self.chartnumber] - price) / self.dentry[\"orderPrice1\" + self.chartnumber] * 100 >= self.stopPercent:\n self.sell(stopped=True)", "def close(self):\n self._run_process_pending_rows = False", "def close (self):\n pass", "def _close_if_complete(self):\n if self.progress_var.get()>=100:\n # delete the variable trace (necessary?)\n #self.progress_var.trace_vdelete('w',self.progress_trace_name)\n\n self._close(final_message=\"Time %s: Finished %s\"%(self.sim.timestr(),\n self.timer.func.__name__))", "def Close(self):", "def stop(self):\n self.state = STATE_CLOSING\n self.join()\n self.state = STATE_CLOSED", "def close(self):\n self._close = True", "def close(self):\n if self.SE == 6:\n self.evr.polarity.put('VAL', 0)\n else:\n self.S_CLOSE = 1", "async def __aexit__(self, exc_type, exc_value, exc_tb):\n await self.close()\n # check for any exceptions\n if exc_type is not None:\n return False\n return True", "def close():", "def _basicClose(self):\n raise NotImplementedError()", "def _basicClose(self):\n raise NotImplementedError()", "def stop(self):\n self.close.set()", "def safe_close(self):\n if self.channel_is_closed:\n return\n if not self.channel_is_bad:\n try:\n self.safe_do_command(\"quit\")\n except BadGtpResponse, e:\n self.errors_seen.append(str(e))\n try:\n self.channel.close()\n except GtpTransportError, e:\n self.errors_seen.append(\"error closing %s:\\n%s\" % (self.name, e))\n self.channel_is_closed = True", "def close(self):\n return", "def close(self):\n return", "def close(self):\n return", "def close(self):\n self.closing = True", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def close(self):\n if not self._close_state.is_set():\n self._close_state.set()" ]
[ "0.6352216", "0.6309689", "0.62376684", "0.617212", "0.6147022", "0.6096366", "0.6096366", "0.6072034", "0.6071069", "0.60654074", "0.6047621", "0.6024533", "0.6024533", "0.60132384", "0.6009106", "0.6005265", "0.6000903", "0.59958005", "0.5981424", "0.5960146", "0.595187", "0.5944617", "0.59244525", "0.59240144", "0.5921675", "0.5921675", "0.59133804", "0.59133804", "0.59133804", "0.59133804", "0.59133804", "0.59133804", "0.59133804", "0.59133804", "0.59097415", "0.5905961", "0.5901501", "0.5899094", "0.5899094", "0.5899094", "0.5894792", "0.58920527", "0.58871245", "0.58817875", "0.58817875", "0.58817875", "0.58810884", "0.58803993", "0.58803993", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5877075", "0.5873498", "0.5863206", "0.58563685", "0.58536226", "0.58500326", "0.5837529", "0.58364016", "0.5825692", "0.58130634", "0.5810204", "0.5808524", "0.5808387", "0.58062845", "0.5802058", "0.5796085", "0.5796085", "0.57957184", "0.5789433", "0.5781038", "0.5781038", "0.5781038", "0.5779216", "0.57774854", "0.5776894" ]
0.0
-1
Removes a service from a list of existing services.
def RemoveServiceFromEndpoints(service_name, services): new_services = [] if not isinstance(services, list): return new_services # TODO(user): Consider throwing an exception if the service is not # already configured in the list of endpoints. for service in services: if not isinstance(service, dict) or 'name' not in service: raise exceptions.ToolException(ValueError( 'Services are expected to be service dicts!')) if service['name'] != service_name: new_services.append(service) return new_services
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DeleteServices(self):\n for service in self.services.values():\n service.Delete()", "def delete_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.delete_service(self, service)", "def remove(self, service):\n os.remove(os.path.join(self.directory, service))", "def remove_service(self, zeroconf, service_type, name):", "def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services", "def delete_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover", "def service_delete(service):\n db = model.Session()\n service = _must_find(db, model.Service, service)\n db.delete(service)\n db.commit()\n\n\n # API Code #\n ############", "def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # prefix is explained in the service_create().\n service = 'tripleo_' + container\n\n sysd_unit_f = systemctl.format_name(service)\n sysd_health_f = systemctl.format_name(service + '_healthcheck')\n sysd_timer_f = service + '_healthcheck.timer'\n sysd_health_req_d = sysd_unit_f + '.requires'\n\n for sysd_f in sysd_unit_f, sysd_health_f, sysd_timer_f:\n if os.path.isfile(sysdir + sysd_f):\n log.debug('Stopping and disabling systemd service for %s' %\n service)\n try:\n systemctl.stop(sysd_f)\n systemctl.disable(sysd_f)\n except systemctl.SystemctlException:\n log.exception(\"systemctl failed\")\n raise\n log.debug('Removing systemd unit file %s' % sysd_f)\n os.remove(sysdir + sysd_f)\n else:\n log.info('No systemd unit file was found for %s' % sysd_f)\n\n # Now that the service is removed, we can remove its \".requires\"\n if os.path.exists(os.path.join(sysdir, sysd_health_req_d)):\n log.info('Removing healthcheck require for %s' % service)\n shutil.rmtree(os.path.join(sysdir, sysd_health_req_d))", "def delete_service(self, service_description, host_name):\n\t\tfor item in self.data['all_service']:\n\t\t\tif (item['service_description'] == service_description) and (host_name in self._get_active_hosts(item)):\n\t\t\t\tself.data['all_service'].remove(item)\n\t\t\t\titem['meta']['delete_me'] = True\n\t\t\t\titem['meta']['needs_commit'] = True\n\t\t\t\tself.data['all_service'].append(item)\n\n\t\t\t\treturn True", "def unproxy_service(self, *service_ids) -> None:\n\n for service_id in service_ids:\n router_key = self._router_key(self._router_id(service_id))\n middleware_key = self._middleware_key(self._middleware_id(service_id))\n tservice_key = self._tservice_key(self._tservice_id(service_id))\n\n self._zk.delete(router_key, recursive=True)\n self._zk.delete(middleware_key, recursive=True)\n self._zk.delete(tservice_key, recursive=True)\n\n # prevents \"KV connection error: middlewares cannot be a standalone element\"\n middlewares_key = f\"/{self._prefix}/http/middlewares\"\n if not self._zk.get_children(middlewares_key):\n self._zk.delete(middlewares_key)\n\n self._trigger_configuration_update()", "def unregister(self, service_name, service_addr, addr_cls=None):\n addr_cls = addr_cls or PlainAddress\n etcd_delete = True\n if addr_cls != PlainAddress:\n etcd_delete = False\n\n for service_name in service_name:\n key = self._form_service_key(service_name, service_addr)\n if etcd_delete:\n self._client.delete(key)\n else:\n self._client.put(addr_cls(service_addr).delete_value())\n\n self._services.get(service_addr, {}).discard(service_name)", "def service_remove(path, service_name):\n compose_result, err = __load_docker_compose(path)\n if err:\n return err\n services = compose_result[\"compose_content\"][\"services\"]\n if service_name not in services:\n return __standardize_result(\n False, \"Service {} did not exists\".format(service_name), None, None\n )\n del services[service_name]\n return __dump_compose_file(\n path,\n compose_result,\n \"Service {} is removed from {}\".format(service_name, path),\n already_existed=True,\n )", "def update_services(self, new_services_list):\n to_stop = [service for service in self if service not in new_services_list]\n for service_id in to_stop:\n self[service_id].stop()\n del self[service_id]\n\n for service_id in new_services_list:\n if service_id not in self:\n self[service_id] = ServiceManager(self.zk_client, self.project_id,\n service_id, self.callback)", "def delService(self):\n self.__selected.delete()\n row = self.currentRow()\n if row >= 1:\n self.__service_list.setCurrentRow(row - 1, QtCore.QItemSelectionModel.Select)\n self.refresh()", "def removeService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "def stop_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service, raise_on=['terminated'])\n logger.info(\"Stopping docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'])\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": False}\n\n # self.store.update_service_map()\n return services", "def remove_pilot_compute_service(self, pjs):\n self.pilot_job_services.remove(pjs)\n CoordinationAdaptor.update_cds(self.url, self)", "def delete_service(self, load_balancer, service):\n # type: (Union[LoadBalancer, BoundLoadBalancer], LoadBalancerService) -> List[BoundAction]\n data = {\n \"listen_port\": service.listen_port,\n }\n\n response = self._client.request(\n url=\"/load_balancers/{load_balancer_id}/actions/delete_service\".format(load_balancer_id=load_balancer.id),\n method=\"POST\", json=data)\n return BoundAction(self._client.actions, response['action'])", "def delete_service(self, service_id):\n service_name = self.fastly_cache[service_id]['service_name']\n del(self.fastly_cache[service_id])\n del(self.fastly_cache[service_name])\n\n return {'status': 'ok'}", "def unregister_service(self, name):\n self._services.remove(name)", "def remove_sp(self, date_limit):\n for provider in ServiceProvider.objects.filter(end_at__lt=date_limit, history=None):\n # Check for history versions\n for sp in ServiceProvider.objects.filter(history=provider.pk):\n self.output(\"Removing service provider (history): \" + sp.entity_id)\n if not self.list_only:\n sp.delete()\n self.output(\"Removing service provider: \" + provider.entity_id)\n if not self.list_only:\n provider.delete()", "def delete_service(self, project_id, service_id):\n service_obj = self.storage_controller.get_service(\n project_id, service_id)\n\n # get provider details for this service\n provider_details = self._get_provider_details(project_id, service_id)\n\n # change each provider detail's status to delete_in_progress\n for provider in service_obj.provider_details:\n service_obj.provider_details[provider].status = (\n u'delete_in_progress')\n\n self.storage_controller.update_service(\n project_id,\n service_id,\n service_obj\n )\n\n kwargs = {\n \"provider_details\": json.dumps(\n dict([(k, v.to_dict()) for k, v in provider_details.items()])),\n \"project_id\": project_id,\n \"service_id\": service_id,\n 'time_seconds': self.determine_sleep_times(),\n 'context_dict': context_utils.get_current().to_dict()\n }\n\n self.distributed_task_controller.submit_task(\n delete_service.delete_service, **kwargs)\n\n return", "def stop_services(self):\n logger.info(\"Stopping services: %s\", self.services)\n for service in self.services:\n with hide(*fab_quiet):\n sudo('service %s stop' % service)", "def delete_service_entry(service_name, service_type):\n manager = get_manager()\n service_id = manager.resolve_service_id(service_name, service_type)\n if service_id:\n manager.api.services.delete(service_id)\n log(\"Deleted service entry '%s'\" % service_name, level=DEBUG)", "async def remove_orphaned_services(\n registry: RedisResourceRegistry, app: web.Application\n) -> None:\n logger.info(\"Starting orphaned services removal...\")\n currently_opened_projects_node_ids = set()\n alive_keys, _ = await registry.get_all_resource_keys()\n for alive_key in alive_keys:\n resources = await registry.get_resources(alive_key)\n if \"project_id\" not in resources:\n continue\n\n project_uuid = resources[\"project_id\"]\n node_ids = await get_workbench_node_ids_from_project_uuid(app, project_uuid)\n currently_opened_projects_node_ids.update(node_ids)\n\n running_interactive_services = await get_running_interactive_services(app)\n logger.info(\n \"Will collect the following: %s\",\n [x[\"service_host\"] for x in running_interactive_services],\n )\n for interactive_service in running_interactive_services:\n # if not present in DB or not part of currently opened projects, can be removed\n node_id = interactive_service[\"service_uuid\"]\n if (\n not await is_node_id_present_in_any_project_workbench(app, node_id)\n or node_id not in currently_opened_projects_node_ids\n ):\n logger.info(\"Will remove service %s\", interactive_service[\"service_host\"])\n try:\n await stop_service(app, node_id)\n except (ServiceNotFoundError, DirectorException) as e:\n logger.warning(\"Error while stopping service: %s\", e)\n\n logger.info(\"Finished orphaned services removal\")", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def rm(path, service_names=None):\n\n project = __load_project(path)\n if isinstance(project, dict):\n return project\n else:\n try:\n project.remove_stopped(service_names)\n except Exception as inst: # pylint: disable=broad-except\n return __handle_except(inst)\n return __standardize_result(\n True, \"Removing stopped containers via docker-compose\", None, None\n )", "async def services_delete(request):\r\n LOG.debug('DELETE /services received.')\r\n # Tap into the database pool\r\n db_pool = request.app['pool']\r\n\r\n # Send request for processing\r\n await delete_services(request, db_pool)\r\n\r\n # Notify aggregators of changed service catalogue\r\n await invalidate_aggregator_caches(request, db_pool)\r\n\r\n # Return confirmation\r\n return web.HTTPNoContent()", "def delete_TestService(test_case, override_service_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n service_name = override_service_name if override_service_name is not null else test_case.test_service_name\n services_info = TestSetup.get_RegisteredServicesList(test_case,\n override_headers=override_headers,\n override_cookies=override_cookies)\n test_service = list(filter(lambda r: r[\"service_name\"] == service_name, services_info))\n # delete as required, skip if non-existing\n if len(test_service) > 0:\n path = \"/services/{svc_name}\".format(svc_name=service_name)\n resp = test_request(app_or_url, \"DELETE\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n check_val_equal(resp.status_code, 200)\n TestSetup.check_NonExistingTestService(test_case, override_service_name=service_name)", "def filter_services(self, services):\n ret = []\n matchers = [re.compile(b) for b in self.service_blacklist_re]\n for s in services:\n if not any([m.match(s) for m in matchers]):\n ret.append(s)\n return set(ret)", "def delete_service(self, service_id):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_Service']\r\n\r\n return svc.deleteObject(id=service_id)", "def stop_service(self,honeypotids,serviceids):\n req = {\"type\": \"stop_services\",\n \"services\": serviceids, \n \"to\": honeypotids, \n \"from\": self.network.mc_id}\n expect_dict = {\"type\":\"stopped_services\"}\n msg_list = self.send_receive(req, honeypotids, expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"services\"]\n return answer", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]) and \\\n not all([i in kwargs for i in ('proto', 'port')]):\n raise TypeError('Expected host or port/proto pair.')\n self.dbdel('service', kwargs)", "def stop(self):\n for service_id in self.keys():\n self[service_id].stop()\n del self[service_id]\n\n self._stopped = True", "def removeSDDCService(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n service_id = kwargs['objectname']\n response = delete_sddc_service_json(proxy, sessiontoken, service_id)\n if response == 200 :\n print(f'The group {service_id} has been deleted.')\n else :\n print(\"There was an error. Try again.\")\n sys.exit(1)", "def kill(self, services):\n list_services = []\n\n LOG.info(\"Receive raw info from Master: %s\" % services)\n for service in services:\n list_services.append(Service(**service))\n\n LOG.info(\"Receive format info from Master: %s\" %\n list_services)\n return self._killer.kill_services(list_services)", "def delete_collection_namespaced_service(namespace, label_selector=None):\n if label_selector is None:\n label_selector = labels_to_string({CLEANUP_LABEL: cleanup_policy})\n responses = []\n svcs = self.core_api.list_namespaced_service(namespace, label_selector=label_selector)\n for svc in svcs.items:\n responses.append(self.core_api.delete_namespaced_service(svc.metadata.name, namespace))\n return responses", "def delete(self, ws_id, project_id, service_id):\n service = servicesimpl.delete_service(project_id, service_id)\n return prepare_response(service)", "def remove(self, packages):\n raise NotImplementedError()", "def stop_service(os_faults_steps):\n stopped = []\n\n def _stop_service(nodes, service):\n cmd = \"service {} stop\".format(service)\n os_faults_steps.execute_cmd(nodes, cmd)\n stopped.append([nodes, service])\n\n yield _stop_service\n\n for nodes, service in reversed(stopped):\n cmd = \"service {} start\".format(service)\n os_faults_steps.execute_cmd(nodes, cmd)", "def remove_service(project, env_spec_name, variable_name, prepare_result=None):\n failed = _check_problems(project)\n\n if failed is not None:\n return failed\n\n requirements = [\n req for req in project.find_requirements(env_spec_name, klass=ServiceRequirement)\n if req.service_type == variable_name or req.env_var == variable_name\n ]\n if not requirements:\n return SimpleStatus(success=False,\n description=\"Service '{}' not found in the project file.\".format(variable_name))\n\n if len(requirements) > 1:\n return SimpleStatus(success=False,\n description=(\"Conflicting results, found {} matches, use list-services\"\n \" to identify which service you want to remove\").format(len(requirements)))\n\n if prepare_result is None:\n prepare_result = prepare.prepare_without_interaction(project,\n provide_whitelist=(requirements[0], ),\n env_spec_name=env_spec_name,\n mode=provide.PROVIDE_MODE_CHECK)\n\n assert env_spec_name is None or prepare_result.env_spec_name == env_spec_name\n\n env_var = requirements[0].env_var\n\n status = prepare.unprepare(project, prepare_result, whitelist=[env_var])\n if not status:\n return status\n\n project.project_file.unset_value(_path_to_service(env_spec_name, env_var))\n project.project_file.use_changes_without_saving()\n assert project.problems == []\n\n project.project_file.save()\n return SimpleStatus(success=True, description=\"Removed service '{}' from the project file.\".format(variable_name))", "def remove_cloud_resources_linked_to_farm(farm: Farm):\n linked_services = IMPL.farm.get_settings(farm.id)['farm']['services']\n LOG.info(f\"Linked to farm [{farm.id}] cloud services: {linked_services}\")\n for service in linked_services:\n method = getattr(lib_resources, f\"delete_{service['type']}\", None)\n if method:\n LOG.info(f\"Remove {service['type']} service {service['cloudObjectId']} from {service['platform']} cloud\")\n IMPL.farm.unlink_cloud_service(farm.id, service['cloudObjectId'])\n method(\n cloud_id=service['cloudObjectId'],\n cloud_location=service['cloudLocation'],\n cloud_name=service['name']\n )", "def delete_vpnservice(self, vpnservice):\r\n return self.delete(self.vpnservice_path % (vpnservice))", "def unregister(self, service_name, service_addr):\n raise NotImplementedError", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "def get_services_list(self, services):\n if not services:\n return []\n\n return [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]", "async def api_delete_service(service_id, g: WalletTypeInfo = Depends(get_key_type)):\n service = await get_service(service_id)\n if not service:\n raise HTTPException(\n status_code=HTTPStatus.NOT_FOUND, detail=\"No service with this ID!\"\n )\n if service.wallet != g.wallet.id:\n raise HTTPException(\n status_code=HTTPStatus.FORBIDDEN,\n detail=\"Not authorized to delete this service!\",\n )\n await delete_service(service_id)\n return \"\", HTTPStatus.NO_CONTENT", "def remove(self, *names):\n for name in names:\n self._storage.pop(name, None)", "def stop(self, instance_name=\"\", capture_output=True,\n update_service_list=True):\n if not update_service_list:\n return\n svc_list = []\n try:\n with open(paths.SVC_LIST_FILE, 'r') as f:\n svc_list = json.load(f)\n except Exception:\n # not fatal, may be the first service\n pass\n\n while self.service_name in svc_list:\n svc_list.remove(self.service_name)\n\n with open(paths.SVC_LIST_FILE, 'w') as f:\n json.dump(svc_list, f)\n\n return", "def remove(self, packages):\n if packages:\n cmd = ['dnf', 'remove'] + list(packages)\n subprocess.Popen(cmd).wait()", "def remove(*, item : Any, list : Union[List[Any], ConduitVariable]) -> None:\n list.remove(item)", "def delete_listener(self, service, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n tls = self.service_adapter.get_tls(service)\n if tls:\n tls['name'] = vip['name']\n tls['partition'] = vip['partition']\n error = None\n for bigip in bigips:\n self.vs_helper.delete(bigip,\n name=vip[\"name\"],\n partition=vip[\"partition\"])\n\n # delete ssl profiles\n # Don't stop processing in case of errors. Otherwise the other F5's might have a different configuration\n try:\n self.remove_ssl_profiles(tls, bigip)\n except Exception as err:\n LOG.error(\"Error adding SSL Profile to listener: {0}\".format(err))\n error = err if error is None else error\n\n if error:\n raise error", "def unpublishAllServices(self):\n for k in self.published.keys():\n self.unpublishService(k)", "def deleteService(self, sid, uid):\n dao = ServiceDAO()\n service = dao.deleteService(sid, uid=uid)\n if service is not None:\n return jsonify(_buildCoreServiceResponse(service))\n return jsonify(Error=\"No service with that ID\"), 404", "def delete_list(self, name: str) -> None:\n key = self.__to_key(name)\n if key in self.lists:\n del self.lists[key]\n else:\n raise ListNotFoundException(name)", "def remove(self, name):\n for i in range(len(self.servers)):\n if name == self.servers[i].name:\n del self.servers[i]\n self.save()\n return True\n raise ValueError('Server not found: ' + name)", "def _resolve_services(self, services=None):\n\n compose_config = self._load_compose_config()\n all_services = [c['name'] for c in compose_config.services]\n if services is None:\n return all_services\n\n # Resolve * regexp based service names\n for service in set(services):\n if '*' in service:\n reg = re.compile(service)\n services.extend(\n [m for m in all_services if reg.match(m)])\n services.remove(service)\n\n # Remove unexisting services\n return set(services) & set(all_services)", "def removeFromWatchlist(self, items):\n if not isinstance(items, list):\n items = [items]\n\n for item in items:\n if not self.onWatchlist(item):\n raise BadRequest(f'\"{item.title}\" is not on the watchlist')\n ratingKey = item.guid.rsplit('/', 1)[-1]\n self.query(f'{self.METADATA}/actions/removeFromWatchlist?ratingKey={ratingKey}', method=self._session.put)\n return self", "def remove(self, *domains):\n for domain in domains:\n self._remove_sequence(domain)", "def delete_node_service(self, zabbix_name):\n serviceid = self._zabbix_get_serviceid(zabbix_name)\n child_serviceids = self._zabbix_get_children_serviceids(serviceid)\n\n # First delete all children\n for i in child_serviceids:\n try:\n self.zapi.service.delete([i])\n except ZabbixAPIException as e:\n logger.exception(e)\n\n # Finally delete the node IT service\n try:\n res = self.zapi.service.delete([serviceid])\n except ZabbixAPIException as e:\n logger.exception(e)\n raise InternalMonitoringError(e)\n\n return parse_zabbix_result(res, 'serviceids', from_get_request=False)", "def test_ipam_services_delete(self):\n pass", "def remove(self, *args):\n return _libsbml.ListOfPorts_remove(self, *args)", "def remove_users(self, *users):\r\n pass", "def stop_service(hosts, remove_db=True):\n username = config.get_param('ENDPOINT_USERNAME')\n password = config.get_param('ENDPOINT_PASSWORD')\n args = [(host, (host, username, password, remove_db), {})\n for host in hosts]\n\n ThreadPool(cleanup_node, args)", "def remove_stopwords(self, *args):\n if self.remove_stopwords is False:\n raise Exception(\"Error - enable stopword removal functionality\")\n if type(args) != list:\n raise Exception(\"Error - expected a list\")\n if args == []:\n raise Exception(\"Error - no items to remove from stopword list\")\n for arg in args:\n if arg in self.stopword_list:\n self.stopword_list.remove(arg)\n else:\n raise Exception(arg+\" not in list\")", "def remove_all(self, *items):\n for item in items:\n self.remove(item)", "def stop_services(services: Services):\n app_root_logger.info(\"Start services stop hierarchy creation\")\n # get estimated order\n stop_flow: List = stop_estimated_order(services)\n stop_flow_str: str = print_flow_order(stop_flow)\n app_root_logger.info(f\"Services estimated stopping order will be:\\n{stop_flow_str}\")\n\n # example of real order\n stop_flow: List = stop_order(services)\n stop_flow_str: str = print_flow_order(stop_flow)\n app_root_logger.info(f\"Services 'real' stopping order will be:\\n{stop_flow_str}\")", "def delete_services(\n self,\n print_service_id, # type: str\n if_match=None, # type: Optional[str]\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.delete_services.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printService-id': self._serialize.url(\"print_service_id\", print_service_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n if if_match is not None:\n header_parameters['If-Match'] = self._serialize.header(\"if_match\", if_match, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def paths_revoke_service(ctx, network, destination, source, port):\n source_service = get_service_for_cli(ctx, network, source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_service, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))", "def removeServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def _unregister_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_unregister_service_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"unregistering service from SOEF.\")", "def get_service_list(service_only: str,\n exclude_services: str,\n service_directory: struct_time) -> List[str]:\n\n if not os.path.isdir(service_directory):\n raise SqPollerConfError(\n 'The service directory provided is not a directory'\n )\n\n svcs = list(Path(service_directory).glob('*.yml'))\n allsvcs = [s_name for s in svcs\n if (s_name := os.path.basename(s).split('.')[0])\n not in BLACKLIST_SERVICES]\n svclist = None\n\n if service_only:\n svclist = service_only.split()\n\n # Check if all the given services are valid\n notvalid = [s for s in svclist if s not in allsvcs]\n if notvalid:\n raise SqPollerConfError(f'Invalid svcs specified: {notvalid}. '\n f'Should have been one of {allsvcs}')\n else:\n svclist = allsvcs\n\n if exclude_services:\n excluded_services = exclude_services.split()\n # Check if all the excluded services are valid\n notvalid = [e for e in excluded_services if e not in allsvcs]\n if notvalid:\n raise SqPollerConfError(f'Services {notvalid} excluded, but '\n 'they are not valid.')\n svclist = list(filter(lambda x: x not in excluded_services,\n svclist))\n\n if not svclist:\n raise SqPollerConfError('The list of services to execute is empty')\n return svclist", "def remove(self, key):\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == key:\r\n self.lis.pop(i)\r\n break", "def remove_all(self, host_names, raise_on_not_found=True):\n for host_name in host_names:\n self.remove_one(host_name, raise_on_not_found)", "def delete_stop_words(list, wordset):\n for word in wordset:\n list.delete(word)", "def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()", "def remove(self, *args):\n return _libsbml.ListOf_remove(self, *args)", "def service(self) -> Generator[dict, None, None]:\n services_api: ServicesAPI = ServicesAPI()\n service: dict = ServiceFactory.create(recurring_ride_service=True)\n\n yield service\n\n services_api.delete_service(service)", "def remove(self, ID):\n i = 0\n for i in range(0, len(self.__lst)):\n if self.__lst[i].getId() == ID:\n self.__lst.pop(i)\n return\n raise ValueError(\"Nu exista disciplina\")", "async def async_will_remove_from_hass(self):\n await super().async_will_remove_from_hass()\n for service in self._device.device_services:\n service.unsubscribe_callback(self.entity_id)", "def remove_from_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='remove')", "def remove_games(self, games):\n for game in games:\n response = self._session.get(self._remove_game_url,\n params={\n 'type': 'game',\n 'id': game.game_id\n })\n response.raise_for_status()", "def list_services(ctx):\n pass", "def delete_pool(self, service, bigips):\n loadbalancer = service.get('loadbalancer')\n pool = self.service_adapter.get_pool(service)\n members = service.get('members', list())\n\n error = None\n for bigip in bigips:\n try:\n self.pool_helper.delete(bigip, name=pool[\"name\"],\n partition=pool[\"partition\"])\n except HTTPError as err:\n if err.response.status_code != 404:\n error = f5_ex.PoolDeleteException(err.message)\n LOG.error(\"Failed to remove pool %s from %s: %s\",\n pool['name'], bigip, error.message)\n except Exception as err:\n error = f5_ex.PoolDeleteException(err.message)\n LOG.error(\"Failed to remove pool %s from %s: %s\",\n pool['name'], bigip, error.message)\n\n for member in members:\n self._delete_member_node(loadbalancer, member, bigip)\n\n return error", "def service_ids(self, service_ids):\n\n self._service_ids = service_ids", "def remove_deployed_services (cls, nffg, log=logging.getLogger(\"CLEAN\")):\n for infra in nffg.infras:\n log.debug(\"Remove deployed elements from Infra: %s\" % infra.id)\n del_ports = []\n del_nfs = []\n for src, dst, link in nffg.network.out_edges_iter(data=True):\n if link.type == NFFG.TYPE_LINK_DYNAMIC and \\\n link.dst.node.type == NFFG.TYPE_NF:\n del_nfs.append(dst)\n del_ports.append(link.src.id)\n if del_nfs:\n nffg.network.remove_nodes_from(del_nfs)\n log.debug(\"Removed NFs: %s\" % del_nfs)\n if del_ports:\n for id in del_ports:\n infra.del_port(id)\n log.debug(\"Removed dynamic ports: %s\" % del_ports)\n log.debug(\"Clear flowrules...\")\n for port in infra.ports:\n port.clear_flowrules()\n\n return nffg", "def list_services(service='http://arcgis.inei.gob.pe:6080/arcgis/rest/services'):\n all_services = []\n r = _post(service)\n for s in r['services']:\n all_services.append('/'.join([service, s['name'], s['type']]))\n for s in r['folders']:\n new = '/'.join([service, s])\n endpt = _post(new)\n for serv in endpt['services']:\n all_services.append('/'.join([service, serv['name'], serv['type']]))\n return all_services", "def remove(self,s):\n \n p1, p2 = self.persons\n \n p1.remove_partner(p2,s)\n p2.remove_partner(p1,s)", "async def async_unsubscribe_services(self) -> None:\n # Delete list of subscriptions and cancel renewal before unsubcribing\n # to avoid unsub-resub race.\n sids = list(self._subscriptions)\n self._subscriptions.clear()\n await self._update_resubscriber_task()\n\n await asyncio.gather(*(self._async_unsubscribe_service(sid) for sid in sids))", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def remove_instances(path, instance_list):\n path = path.lower().replace(\" \", \"-\")\n problem_instances = get_all_problem_instances(path)\n deployment_json_dir = join(DEPLOYED_ROOT, path)\n\n for instance in problem_instances:\n instance_number = instance[\"instance_number\"]\n if instance[\"instance_number\"] in instance_list:\n logger.debug(\"Removing instance {} of '{}'.\".format(\n instance_number, path))\n\n directory = instance[\"deployment_directory\"]\n user = instance[\"user\"]\n service = instance[\"service\"]\n socket = instance[\"socket\"]\n deployment_json_path = join(deployment_json_dir,\n \"{}.json\".format(instance_number))\n\n if service != None:\n logger.debug(\"...Removing xinetd service '%s'.\", service)\n os.remove(join(XINETD_SERVICE_PATH, service))\n\n logger.debug(\"...Removing deployment directory '%s'.\", directory)\n shutil.rmtree(directory)\n os.remove(deployment_json_path)\n\n logger.debug(\"...Removing problem user '%s'.\", user)\n execute([\"userdel\", user])\n\n if problem_instances:\n execute([\"service\", \"xinetd\", \"restart\"], timeout=60)", "def delete_service(self, apikey, resource):\n url = '{}/iot/services/?apikey={}&resource={}'.format(self.url, apikey, resource)\n r = requests.delete(url, headers=self.headers)\n return r", "def remove(self, urls):\n path = \"authSettings/exemptedUrls?action=REMOVE_FROM_LIST\"\n return self._session.post(path, urls)", "def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)", "def stop_service(self):\n\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Tearing down service\")\n\n try:\n self.dkr_service.remove()\n except:\n logging.warning(\"Failed to stop service {}\".format(self.dkr_name))\n pass", "def command_rm(self, system_id, *system_ids):\n # Intentionally reading the first system_id separately,\n # because it's required. The others are optional.\n # This ensures that we'll generate an error if someone tries to call\n # this without the required argument.\n system_ids = (system_id,) + system_ids\n has_failed = False\n for system_id in system_ids:\n try:\n system = SystemModel.create_by_id(system_id, self.environment)\n controller = SystemControllerModel(system, self.environment)\n controller.unmount()\n system.delete(self.environment)\n except SftpException as e:\n sys.stderr.write('Cannot remove %s: %s\\n' % (system_id, str(e)))\n has_failed = True\n if has_failed:\n sys.exit(1)", "def remove_files(files):\n for file_name in files:\n os.remove(file_name)", "def delete_service_final(self, row):\n service = self.services_table[row]\n\n sure = self.yes_no_dialog(\"Are you sure you want to delete Service '{}'?\".format(service.title))\n\n if not sure:\n return\n\n self.backend.service_delete(service.id)\n self.refresh_services()" ]
[ "0.7037286", "0.675924", "0.6610748", "0.6590831", "0.62848717", "0.6255595", "0.61463314", "0.6130574", "0.611379", "0.6080032", "0.6072836", "0.6069888", "0.60036", "0.5951796", "0.5946897", "0.59276074", "0.59024787", "0.58957607", "0.58896816", "0.5826269", "0.57981753", "0.5736189", "0.57276124", "0.56832755", "0.56461155", "0.56343794", "0.561863", "0.5609318", "0.5520186", "0.54270095", "0.54173523", "0.5417318", "0.5416733", "0.5398495", "0.53685963", "0.5365034", "0.5353757", "0.5338416", "0.5337832", "0.5332726", "0.53232884", "0.531986", "0.53026015", "0.52750826", "0.52630585", "0.5259642", "0.52571505", "0.5238517", "0.5218924", "0.5216392", "0.5213432", "0.5211593", "0.5187772", "0.5179352", "0.51704973", "0.51667625", "0.5161416", "0.51560247", "0.51426256", "0.5135176", "0.51137114", "0.5111181", "0.51049817", "0.50974584", "0.50831324", "0.5075591", "0.5075186", "0.50720865", "0.5068054", "0.5064795", "0.50644124", "0.50644124", "0.506334", "0.50605965", "0.5054261", "0.5043163", "0.5038815", "0.50382173", "0.5037159", "0.5037062", "0.50364053", "0.5034613", "0.5033753", "0.5025406", "0.50202745", "0.5007112", "0.4997561", "0.4990593", "0.49827054", "0.49765393", "0.49584636", "0.49554697", "0.4950943", "0.49404895", "0.49384335", "0.49383727", "0.49369475", "0.49364758", "0.4923179", "0.49077958" ]
0.7142464
0
Return distance of two keys in qwerty keyboard based on manhattan or euclidean distance.
def key_distance(self, x, y, type="manhattan"): if type == "manhattan": return self.manhattan_dist_matrix[self.keys.index(x), self.keys.index(y)] elif type == "euclidean": return self.euclidean_dist_matrix[self.keys.index(x), self.keys.index(y)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self, keyOne, keyTwo):", "def qwerty_distance():\n from collections import defaultdict\n import math\n R = defaultdict(dict)\n R['-']['-'] = 0\n zones = [\"dfghjk\", \"ertyuislcvbnm\", \"qwazxpo\"]\n keyboard = [\"qwertyuiop\", \"asdfghjkl\", \"zxcvbnm\"]\n for num, content in enumerate(zones):\n for char in content:\n R['-'][char] = num + 1\n R[char]['-'] = 3 - num\n for a in ascii_lowercase:\n rowA = None\n posA = None\n for num, content in enumerate(keyboard):\n if a in content:\n rowA = num\n posA = content.index(a)\n for b in ascii_lowercase:\n for rowB, contentB in enumerate(keyboard):\n if b in contentB:\n R[a][b] = int(math.fabs(rowB - rowA) + math.fabs(posA - contentB.index(b)))\n return R", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def distance(a, b):\n a = a[0]\n b = b[0]\n if lower.search(a):\n if lower.search(b):\n return abs(ord(b) - ord(a)) % 8\n elif upper.search(b):\n return abs(ord(b.lower()) - ord(a)) % 5 + 8\n elif upper.search(a):\n if lower.search(b):\n return abs(ord(a.lower()) - ord(b)) % 5 + 8\n elif upper.search(b):\n return abs(ord(b) - ord(a)) % 8\n if a == b:\n return 0\n return 1", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def dist(string1, string2):\n if string1 == string2:\n return 0\n count1 = Counter(string1)\n count2 = Counter(string2)\n\n keys = set(count1.keys())\n keys.update(count2.keys())\n dist = sum(abs(count1.get(letter, 0) - count2.get(letter, 0)) for letter in keys)\n return dist", "def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def manhattan(rating1, rating2):\r\n distance = 0\r\n commonRatings = False \r\n for key in rating1:\r\n if key in rating2:\r\n distance += abs(rating1[key] - rating2[key])\r\n commonRatings = True\r\n if commonRatings:\r\n return distance\r\n else:\r\n return -1 #Indicates no ratings in common\r", "def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def distance(p_1, p_2):\n return ((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2 \\\n + (p_2[2] - p_1[2]) ** 2) ** 0.5", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def distance(self, wn1, wn2):\n return abs(self.chunk_map[wn1] - self.chunk_map[wn2])", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))", "def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])", "def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)", "def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5", "def CalculateDistance(q1, q2):\r\n return np.sqrt((q1[0] - q2[0])**2 + (q1[1] - q2[1])**2)", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)", "def edit_distance (str1, str2):\n str1.strip()\n str2.strip()\n if len(str1) != len(str2):\n raise ValueError(\"Strings have to be of equal lengths: \" + str1 + \" and \" + str2)\n\n return sum(bit=='1' for bit in bin(int(binascii.hexlify(xorstr(str1, str2)), 16)))", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)", "def edit_distance(str1, str2):\r\n pass", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def calculate_distance(coords1, coords2):# 3 sets of double quotes allows to give a description for help\n distance_x = coords1[0] - coords2[0]\n distance_y = coords1[1] - coords2[1]\n distance_z = coords1[2] - coords2[2]\n distance = numpy.sqrt(distance_x**2 + distance_y**2 + distance_z**2)\n return distance", "def getDistance(self,p1,p2):\n return sum([(p1[i]-p2[i])**2 for i in range(2)])", "def manhattan_dist(c1, c2):\n return abs(c1[0] - c2[0]) + abs(c1[1] - c2[1]) + abs(c1[2] - c2[2])", "def distance_metric(actions1, actions2):\n diff = actions1-actions2\n mean_diff = np.mean(np.square(diff), axis=0)\n dist = sqrt(np.mean(mean_diff))\n return dist", "def distance():\n return str(us.get_distance())", "def get_distance(qa_dict, qb_dict):\r\n d=0\r\n j_names=qa_dict.keys()\r\n if len(j_names)==0:\r\n rospy.loginfo(\"Length is 0\")\r\n return 0\r\n for jn in j_names:\r\n d+=abs(qb_dict[jn]-qa_dict[jn])\r\n d/=len(j_names)\r\n return d", "def gram_edit_distance(self, gram1, gram2):\r\n distance = 0.0\r\n if gram1 == gram2:\r\n distance = 1.0\r\n return distance", "def distance(dna_1, dna_2):\n hamming_difference = 0\n\n # turn the strings into a list of characters\n a = list(dna_1)\n b = list(dna_2)\n\n # iterate through both lists and compares. If they are not the same increment the hamming_difference\n for nucleotide_1, nucleotide_2 in map(None, dna_1, dna_2):\n if nucleotide_1 != nucleotide_2:\n hamming_difference += 1\n\n return hamming_difference", "def distance(self, x, y, keyboard_weight=None):\r\n dist_matrix = self.distance_matrix(x, y, keyboard_weight)\r\n return dist_matrix[-1, -1]", "def edit_distance(str_1, str_2):\n return edit_distance_dp(str_1, len(str_1), str_2, len(str_2))", "def kc_distance(self, other, lambda_=0.0):\n return self._ll_tree.get_kc_distance(other._ll_tree, lambda_)", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def hamming_distance(string1: str, string2: str) -> int:\n if len(string1) != len(string2):\n raise ValueError(\"String lengths must match!\")\n\n count = 0\n\n for char1, char2 in zip(string1, string2):\n if char1 != char2:\n count += 1\n\n return count", "def gram_edit_distance(self, gram1, gram2):\n distance = 0.0\n if gram1 == gram2:\n distance = 1.0\n return distance", "def distance(A, B):\n return abs(A - B)", "def distance(A, B):\n return abs(A - B)", "def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))", "def get_wmd_dist(s1, s2, model):\r\n s1 = s1.lower().strip().split()\r\n s2 = s2.lower().strip().split()\r\n\r\n distance = model.wmdistance(s1, s2)\r\n return distance", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def distance(p1, p2):\n return math.hypot(p2[0] - p1[0], p2[1] - p1[1])", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def get_distance_2(pos1, pos2):\n\treturn (pos2[0] - pos1[0])**2 + (pos2[1] - pos1[1])**2", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)", "def distance(p1, p2):\n dist = 0\n for k in set([*p1.keys(), *p2.keys()]):\n dist += (p1.get(k, 0) - p2.get(k, 0))**2\n return math.sqrt(dist)", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)", "def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d", "def distance(self, a, b):\n raise NotImplementedError()", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def trame_distance(t1, t2):\n return np.linalg.norm(t1 - t2)", "def euclidian_distance(stroke1, stroke2):\n\n x1 = np.array(stroke1.x)\n x2 = np.array(stroke2.x)\n y1 = np.array(stroke1.y)\n y2 = np.array(stroke2.y)\n\n d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n m = d - np.min(d)\n if np.mean(m) < 0:\n return 0, 0\n else:\n return np.mean(d), np.mean(m)", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def manhattan_distance(x, y):\n return sum(abs(a - b) for a, b in zip(x, y))", "def kmer_distance(seq1,seq2,k=3):\n seq1_set = set(count_kmers(seq1,k).keys())\n seq2_set = set(count_kmers(seq2,k).keys())\n union_seq = seq1_set.union(seq2_set)\n dissimilarity = seq1_set ^ seq2_set\n distance = len(dissimilarity)/len(union_seq)\n print(dissimilarity)\n return distance", "def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))", "def measure_distance(cell1, cell2):\n\n\tx1, y1 = cell1.location\n\tx2, y2 = cell2.location\n\tx_dist = abs(x1-x2)\n\ty_dist = abs(y1-y2)\n\n\tif x_dist > 5:\n\t\tx_dist = 10-x_dist\n\tif y_dist > 5:\n\t\ty_dist = 10-y_dist\n\n\treturn (x_dist**2 + y_dist**2)**.5", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step", "def distkp(self, k1: int, k2: int) -> float:\n result = self._read_inline(f\"distkp({k1},{k2})\")\n return result", "def hamming_distance(s1, s2, hamming_distance = 3):\n\ts1 = str(s1)\n\ts2 = str(s1)\n\n\tif len(s1) != len(s2):\n\t\ts1 = replenish_int(s1, 6)\n\t\ts2 = replenish_int(s2, 6)\n\tdis = sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\n\tif dis <= hamming_distance:\n\t\t'表示海明距离在 3 以内'\n\t\treturn True\n\telse:\n\t\treturn False", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def dist(x1, x2, distance):\n if distance == 'l2':\n return np.sqrt(np.sum(np.square(x1 - x2)))\n elif distance == 'squared_l2':\n return np.sum(np.square(x1 - x2))\n else:\n raise Exception(\"The distance '%s' is not supported.\" % distance)", "def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)", "def edit_distance(left_word: str, right_word: str) -> int:\n if len(left_word) != len(right_word):\n raise ValueError(\"Word ladder words must be same length\")\n\n distance = 0;\n for i in range(len(left_word)):\n if left_word[i] != right_word[i]:\n distance += 1\n return distance", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def hamdist(str1, str2):\n\n diffs = 0\n for ch1, ch2 in zip(str1, str2):\n if ch1 != ch2:\n diffs += 1\n return diffs", "def query_distance(self, instance1=(), instance2=()):\n distance = sum([pow((a - b), 2) for a, b in zip(instance1, instance2)])\n return distance", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def distance_between_hex_cells(cell1, cell2):\n return even_q_distance(*(cell1 + cell2))", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def dist_colorweight(ele1, ele2):\n \n dist_colorweight_v = ele1[2]*ele2[2]*dist_euclidean(ele1[0:2], ele2[0:2])\n return dist_colorweight_v", "def cer(self, s1, s2):\n s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')\n return Lev.distance(s1, s2)", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def manhattan_distance(a: ArrayLike, b: ArrayLike) -> NDArrayFloat:\n\n return as_float(\n np.sum(np.abs(as_float_array(a) - as_float_array(b)), axis=-1)\n )" ]
[ "0.75387555", "0.69056517", "0.6859236", "0.67510206", "0.6659005", "0.66116893", "0.6603836", "0.65740633", "0.6566495", "0.65459806", "0.6529574", "0.6490348", "0.6489842", "0.64859676", "0.6471863", "0.6434619", "0.6432744", "0.6414012", "0.6411454", "0.63999486", "0.6389636", "0.6386778", "0.63855314", "0.63828", "0.6381105", "0.63664514", "0.63640213", "0.63637084", "0.6362522", "0.63544244", "0.63525206", "0.634978", "0.63490933", "0.6345371", "0.6343757", "0.6343312", "0.6342877", "0.6330021", "0.63287455", "0.63254166", "0.63246644", "0.6316623", "0.6314813", "0.6308631", "0.6300285", "0.6296165", "0.6292006", "0.6282384", "0.62811154", "0.627325", "0.627325", "0.6270528", "0.626758", "0.6261798", "0.6260301", "0.62491417", "0.6246831", "0.62453943", "0.62443763", "0.62435037", "0.62366223", "0.6232878", "0.62325186", "0.6223068", "0.6201013", "0.6198643", "0.6190767", "0.61898446", "0.61845297", "0.61811703", "0.6180959", "0.61778975", "0.6176302", "0.61753947", "0.61719364", "0.6171344", "0.61688143", "0.6167757", "0.61617565", "0.6147633", "0.6146468", "0.61431503", "0.6137463", "0.6132237", "0.6130301", "0.6129195", "0.61259645", "0.61258805", "0.6122278", "0.61162955", "0.61035055", "0.6102385", "0.60991406", "0.6099093", "0.6096274", "0.6093531", "0.60896903", "0.60883003", "0.60844827", "0.6084412" ]
0.71139956
1
Calculate matrix of number of edits to convert every subset of y to every subset of x
def distance_matrix(self, x, y, keyboard_weight=None): # create distance matrix size_x = len(x) + 1 size_y = len(y) + 1 dist_matrix = np.zeros((size_x, size_y)) for i in range(size_x): dist_matrix[i, 0] = i for j in range(size_y): dist_matrix[0, j] = j ## fill distance matrix # no keyboard weight if not keyboard_weight: for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: subs = dist_matrix[i-1, j-1] + 1 delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # manhattan keyboard weight elif keyboard_weight == "manhattan": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.manhattan_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # euclidean keyboard weight elif keyboard_weight == "euclidean": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.euclidean_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) return dist_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topsolutions(self):\n answers = []\n for y in xrange(0, self.y):\n answer = self.retrieve(y,self.y)\n i = 0\n for x in xrange(0,y):\n answer -= self.retrieve(y,x)*answers[i]\n i += 1\n answers.append(answer)\n return answers", "def build_set(x, y):\n # E_w[yy^T]\n y_y_t = la.inv(np.dot(y, y.transpose()))\n h_matrix = np.dot(np.dot(x, y), y_y_t)\n return h_matrix", "def transform(self, y):\n\n y = self.ensure_output_format(\n y, sparse_format='lil', enforce_sparse=True)\n\n self.clean()\n self.label_count = y.shape[1]\n\n last_id = 0\n train_vector = []\n for labels_applied in y.rows:\n label_string = \",\".join(map(str, labels_applied))\n\n if label_string not in self.unique_combinations:\n self.unique_combinations[label_string] = last_id\n self.reverse_combinations.append(labels_applied)\n last_id += 1\n\n train_vector.append(self.unique_combinations[label_string])\n\n return np.array(train_vector)", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def editDist(X, Y, m, n, normalize_len=False):\n if normalize_len:\n if m < n:\n quot = n // m\n new_X = []\n for i in range(quot):\n new_X += X\n X = new_X\n # print(\"Normalized length for X: {}\".format(len(X)))\n else:\n quot = m // n\n new_Y = []\n for i in range(quot):\n new_Y += Y\n Y = new_Y\n # print(\"Normalized length for Y: {}\".format(len(Y)))\n\n\n dp = [[0 for x in range(n+1)] for x in range(m+1)]\n for i in range(m+1):\n for j in range(n+1):\n\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n elif X[i-1] == Y[j-1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i-1][j], dp[i-1][j-1])\n\n return dp[m][n]", "def p_y_x_knn(y, k):\n result = np.zeros((len(y), 4))\n for i in range(len(y)):\n for j in range(k):\n result[i, y[i, j]] = result[i, y[i, j]] + 1\n return 1 / k * result", "def transform( self, X, y = None ):\n matrix = np.zeros((len(X),len(self.feature_names)))\n for i,bag in enumerate(X):\n for test in bag:\n try:\n matrix[i,self.feature_names.index(test)] = 1\n except ValueError:\n pass\n return matrix", "def toCartesian(self, y):\r\n return Size - y", "def add_matrices(x, y):\n return [[x[i][j] + y[i][j] for j in range(len(x[0]))] for i in range(len(x))]", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def UpdateCostMatrix( self, extraXs ):\n for x in extraXs:\n newRow = [ self.EuclideanDistanceSq(x,y) for y in self.Y ]\n self.C.append(newRow)", "def copnorm_cat_1d(x, y):\n assert isinstance(x, np.ndarray) and (x.ndim == 1)\n assert isinstance(y, np.ndarray) and (x.ndim == 1)\n assert y.dtype in CONFIG['INT_DTYPE']\n x_cop = np.zeros_like(x)\n y_u = np.unique(y)\n for yi in y_u:\n _idx = y == yi\n x_cop[_idx] = copnorm_1d(x[_idx])\n return x_cop", "def cost_matrix(x, y, p=2):\n xc = tf.expand_dims(x, 1)\n yr = tf.expand_dims(y, 0)\n d = tf.math.pow(tf.abs(xc - yr), p)\n return tf.reduce_sum(d, axis=-1)", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.ones((M,M))*-1\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def reconstruct_input(self, ix):", "def inverse_transform(self, y):\n n_samples = len(y)\n result = sparse.lil_matrix((n_samples, self.label_count), dtype='i8')\n for row in range(n_samples):\n assignment = y[row]\n result[row, self.reverse_combinations[assignment]] = 1\n\n return result", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def init_output_mat(self, y_list):", "def associate(x, y):\n x = torch.cat([x[1:], x])\n xx, yy = x.reshape(1,1,-1), y.flip(0).reshape(1,1,-1)\n zz = torch.nn.functional.conv1d(xx, yy)\n z = zz.reshape(-1)\n return normalize(z)", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.zeros((M,M))\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def alloc2d(x,y,iv=0):\n return [[iv for j in range(int(x))] for i in range(int(y))]", "def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed", "def inverse_transform(self, y: Array2D) -> Array2D:", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def transform(self, x: Array2D) -> Array2D:", "def transform(self,X):\n # conver X to a list\n X = X.tolist()\n result = []\n\n # iterate over the length of X\n for b in range(len(X)):\n\n # change dataset accoring to bias\n if self.include_bias:\n X[b].insert(0, 1)\n \n # initialize an array to store dynamically all array of indices\n init_arr = []\n for j in range(len(X[b])):\n init_arr.append([j])\n\n # array of indices\n arr = [j for j in range(len(X[b]))]\n separate_arr = init_arr.copy()\n\n # iterate for the degree given\n for k in range(0,self.degree-1):\n # for len of the array containing indices\n for i in range(len(arr)):\n temp = i\n # this loop will have different length since length increases\n for j in range((k)*len(arr),len(separate_arr)):\n element = init_arr[j].copy()\n element.append(temp)\n init_arr.append(element) \n separate_arr = init_arr.copy()\n # sort the array obtained to remove repeated elements\n array = []\n for m in range(len(init_arr)):\n init_arr[m].sort()\n if(init_arr[m] not in array):\n array.append(init_arr[m])\n\n # calculate the final values by multiplying the numbers or columns at the place of indices\n final = []\n for i in array:\n lst = []\n # only if lenth satisfies the given degree\n if len(i)==self.degree:\n for j in i: \n lst.append(X[b][j]) \n final.append(np.product(lst))\n result.append(final)\n return result", "def solutions(self):\n answers = []\n for y in reversed(xrange(0, self.y)):\n answer = self.retrieve(y,self.y)\n i = 0\n for x in reversed(xrange(y+1, self.y)):\n answer -= self.retrieve(y,x)*answers[i]\n i += 1\n answers.append(answer)\n answers.reverse()\n return answers", "def transform(self, y):\n return self.cidx_by_size_[self.le_.transform(y)]", "def get_imin(x1, x2, y, k=1, normalize=None, norm=np.inf):\n\n if normalize:\n y = normalize(y)\n\n y_tree = cKDTree(y)\n\n n = len(y)\n i_spec = np.zeros((2, n))\n\n for jj, x in enumerate([x1, x2]):\n\n if normalize:\n x = normalize(x)\n\n # construct state array for the joint processes:\n xy = np.c_[x,y]\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in xrange(N):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n i_spec[jj] = digamma(k) - digamma(nx+1) + digamma(ny+1) + digamma(n) # version (1)\n\n i_min = np.mean(np.min(i_spec, 0))\n\n return i_min", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def create(matrix):\n limit_y = len(matrix)\n limit_x = len(matrix[0])\n\n for y in range(1, limit_y):\n bit.create(matrix[y])\n\n for x in range(1, limit_x):\n for y in range(1, limit_y):\n k = y + (y & -y)\n if k < limit_y:\n matrix[k][x] += matrix[y][x]", "def spaceeff_dyna(x, y, c):\n n = len(y)\n m = len(x)\n for i in range(1, m + 1):\n c[0][1] = 0\n for j in range(1, n + 1):\n if x[i - 1] == y[j - 1]:\n c[j][1] = c[j - 1][0] + 1\n else:\n ctop = c[j][0]\n cleft = c[j - 1][1]\n ctopleft = c[j - 1][0]\n\n c[j][1], d = max3(ctopleft, ctop, cleft)\n for k in range(len(c)):\n c[k][0] = c[k][1]", "def matrixReduction(setHor, setVer, arrayToReduce):\r\n listTemp = []\r\n for i in range(len(setVer)):\r\n listTemp.append(arrayToReduce[setVer[i].index, :])\r\n arrayTemp = numpy.array(listTemp)\r\n listTemp = []\r\n for i in range(len(setHor)):\r\n listTemp.append(arrayTemp[:, setHor[i].index])\r\n result = numpy.transpose(numpy.array(listTemp))\r\n\r\n return result", "def ipset_num_x_y_different():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 3), x_new=np.linspace(2, 5, 4))", "def make_likelihood_table(x, y):\n\n Y = np.unique(y)\n X = np.unique(x)\n\n likelihood = [[0 for i in range(len(Y))] for j in range(len(X))]\n\n freq = make_frequency_table(x, y, X, Y)\n\n for j in range(len(Y)):\n Sum = (y == Y[j]).sum()\n for i in range(len(X)):\n likelihood[i][j] = freq[X[i]][j] / Sum\n\n return likelihood", "def fit_transform(self, x: Array2D) -> Array2D:", "def _permute_observations(x, y, num_perms):\r\n vals = hstack([array(x), array(y)])\r\n lenx = len(x)\r\n # sorting step is unnecessary for this code, but it ensure that test code\r\n # which relies on seeding the prng works (if we dont do this then different\r\n # observation orders in x and y for eg. the mc_t_two_sample test will fail\r\n # to produce the same results)\r\n vals.sort()\r\n inds = arange(vals.size)\r\n xs, ys = [], []\r\n for i in range(num_perms):\r\n shuffle(inds)\r\n xs.append(vals[inds[:lenx]])\r\n ys.append(vals[inds[lenx:]])\r\n return xs, ys", "def _lev(x, y):\n mat = initialize_matrix(x, y)\n\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n mat[i].append(_generate_new_node(mat, i, j, x, y))\n\n return mat", "def _lev(x, y):\n mat = initialize_matrix(x, y)\n\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n mat[i].append(_generate_new_node(mat, i, j, x, y))\n\n return mat", "def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res", "def filt1(X, yvals, xvals, ny, nx):\n \n ylen = X.shape[0]\n xlen = X.shape[1]\n\n yflen = (ylen-1)//ny\n xflen = (xlen-1)//nx\n\n Y = np.zeros((X.shape))\n\n #Y = Y[0:yflen,0:xflen,]\n\n ymax = ny*yflen+1\n xmax = nx*xflen+1\n\n Y = Y[0:ymax,0:xmax,]\n Xnew = X[0:ymax,0:xmax,]\n yvals = yvals[0:ymax,0:xmax,]\n xvals = xvals[0:ymax,0:xmax,] \n\n counter = np.zeros((Y.shape))\n \n for i in range(xflen):\n xmin = nx*i\n xmax = nx*(i+1)+1\n for j in range(yflen):\n ymin = ny*j\n ymax = ny*(j + 1)+1\n #print((xmin,xmax), (ymin,ymax))\n Y[ymin:ymax,xmin:xmax,] = Y[ymin:ymax,xmin:xmax,] + np.mean(X[ymin:ymax,xmin:xmax,], axis=(0,1))\n counter[ymin:ymax,xmin:xmax,] = counter[ymin:ymax,xmin:xmax,] + 1\n\n Y = Y/counter #We take the average of the points that appear more than once\n\n return Xnew, Y, yvals, xvals", "def fit(self, x, y):\n tempdf = pd.DataFrame({'x':x, 'y':y})\n self.d = tempdf.groupby('x').apply(lambda g: g.y.sum()/len(g)).to_dict()", "def cemap_cal(y_pred,y_true):\r\n nTest = y_true.shape[0]\r\n nLabel = y_true.shape[1]\r\n ap = np.zeros(nTest)\r\n for i in range(0,nTest):\r\n for j in range(0,nLabel):\r\n R = np.sum(y_true[i,:])\r\n if y_true[i,j]==1:\r\n r = np.sum(y_pred[i,:]>=y_pred[i,j])\r\n rb = np.sum(y_pred[i,np.nonzero(y_true[i,:])] >= y_pred[i,j])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n imap = np.nanmean(ap)\r\n\r\n ap = np.zeros(nLabel)\r\n for i in range(0,nLabel):\r\n for j in range(0,nTest):\r\n R = np.sum(y_true[:,i])\r\n if y_true[j,i]==1:\r\n r = np.sum(y_pred[:,i] >= y_pred[j,i])\r\n rb = np.sum(y_pred[np.nonzero(y_true[:,i]),i] >= y_pred[j,i])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n lmap = np.nanmean(ap)\r\n\r\n return lmap,imap", "def fit(self, X, y):\n # TODO: Implement\n self.cols = X.columns\n self.nCols = len(self.cols)\n X = np.array(X)\n y = np.array(y)\n \n for i in range(X.shape[1]): \n uniq = np.unique(X[:, i])\n self.possible.append(list(uniq)) # add possible values\n self.valN.append(len(uniq)) # and how many\n index = np.argmax(self.valN)\n print(index)\n self.tree = self.buildTree(X, y)", "def ipset_y_2d():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11, 4), x_new=np.linspace(1, 4, 3))", "def sum_outers(x, y):\n # In PyTorch 4.0, `einsum` modifies variables inplace. This will not work\n # unless you have PyTorch 4.1:\n #\n # https://github.com/pytorch/pytorch/issues/7763\n #\n return torch.einsum('ab,cb->ac', [x, y])", "def UpdateS(s, Difference, WorkingSet):\n for i in range(len(x_train)):\n Sum = 0.0\n for j in range(q):\n Sum = Sum + (Difference[j])*y_train[int(WorkingSet[j,0])]*Kernel(i, int(WorkingSet[j,0]))\n s[i] = s[i] + Sum\n return s", "def _diffmat_objective(a,X):\n \n (n,p) = X.shape\n return(X - np.tile(a,(n,1)))", "def multiple(x, y):\n curRow = [1] * x\n for _ in range(1,y):\n for N in range(1,x):\n curRow[N] = curRow[N-1] + curRow[N]\n return curRow[x-1]", "def interpolV(y, x, newX):\r\n \r\n num = len(x)\r\n #if (num != len(y)):\r\n #//System.out.println(\"Toolbox.interpolV(): Old x and y must be same length\"); \r\n \r\n newNum = len(newX)\r\n #//System.out.println(\"interpolV: newNum \" + newNum + \" num \" + num); \r\n #newY = [0.0 for i in range(newNum)]\r\n\r\n#//Renormalize ordinates:\r\n \r\n iMinAndMax = minMax(y)\r\n norm = y[iMinAndMax[1]]\r\n #//System.out.println(\"norm \" + norm);\r\n #yNorm = [0.0 for i in range(num)]\r\n newYNorm = [0.0 for i in range(newNum)] \r\n #for i in range(num):\r\n # yNorm[i] = y[i] / norm \r\n yNorm = [ x / norm for x in y ]\r\n\r\n#// Set any newX elements that are *less than* the first x element to th first \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n start = 0\r\n for i in range(newNum):\r\n if (newX[i] <= x[1]):\r\n newYNorm[i] = yNorm[0]\r\n start += 1\r\n \r\n if (newX[i] > x[1]):\r\n break\r\n \r\n \r\n#//System.out.println(\"start \" + start);\r\n#//System.out.println(\"x[0] \" + x[0] + \" x[1] \" + x[1] + \" newX[start] \" + newX[start]);\r\n#double jWght, jm1Wght, denom;\r\n\r\n\r\n if (start < newNum-1):\r\n\r\n j = 1 #//initialize old abscissae index\r\n #//outer loop over new abscissae\r\n for i in range(start, newNum):\r\n\r\n #//System.out.println(\"i \" + i + \" j \" + j);\r\n\r\n#// break out if current element newX is *greater* that last x element\r\n if ( (newX[i] > x[num-1]) or (j > (num-1)) ):\r\n break \r\n \r\n\r\n while (x[j] < newX[i]): \r\n j += 1\r\n \r\n #//System.out.println(\"i \" + i + \" newX[i] \" + newX[i] + \" j \" + j + \" x[j-1] \" + x[j-1] + \" x[j] \" + x[j]);\r\n #//1st order Lagrange method:\r\n jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])\r\n jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])\r\n denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])\r\n jWght = jWght / denom\r\n jm1Wght = jm1Wght / denom\r\n #//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));\r\n newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)\r\n #//System.out.println(\"i \" + i + \" newYNorm[i] \" + newYNorm[i] + \" j \" + j + \" yNorm[j-1] \" + yNorm[j-1] + \" yNorm[j] \" + yNorm[j]);\r\n \r\n\r\n#// Set any newX elements that are *greater than* the first x element to the last \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n for i in range(newNum):\r\n if (newX[i] >= x[num-1]):\r\n newYNorm[i] = yNorm[num-1]\r\n \r\n \r\n\r\n #//Restore orinate scale\r\n #for i in range(newNum):\r\n # newY[i] = newYNorm[i] * norm \r\n newY = [ x * norm for x in newYNorm ]\r\n\r\n\r\n return newY", "def back_spaceeff_dyna(x, y, c):\n n = len(y)\n m = len(x)\n for i in range(m - 1, -1, -1):\n c[n][1] = 0\n for j in range(n - 1, -1, -1):\n if x[i] == y[j]:\n c[j][1] = c[j + 1][0] + 1\n else:\n cbottom = c[j][0]\n cright = c[j + 1][1]\n cbottomright = c[j + 1][0]\n\n c[j][1], d = max3(cbottomright, cbottom, cright)\n for k in range(len(c)):\n c[k][0] = c[k][1]", "def expand_features_and_labels(x_feat, y_labels):\n x_expanded = []\n y_expanded = []\n for x, y in zip(x_feat, y_labels):\n for segment in x:\n x_expanded.append(segment)\n y_expanded.append(y)\n return x_expanded, y_expanded", "def transform(self, X, y=None):\n rows = []\n cols = []\n data = []\n #Loop through each reviews\n for row, word_count in enumerate(X):\n #Analyze each review with the total vocab of this dataset\n for word, count in word_count.items():\n rows.append(row)\n cols.append(self.vocabulary_.get(word, 0)) #If non, replace it with 0\n data.append(count)\n \n return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))", "def update_grid(self, x):\r\n\r\n # Append boundary rows and columns to matrix\r\n x = self.append_boundary(x) # the boundary is recomputed at each step\r\n y = np.copy(x)\r\n\r\n # For each cell within boundary, compute state according to rules.\r\n chg_0_1 = 0 # the number of cells that changed from state 0 to state 1\r\n chg_1_0 = 0 # the number of cells that changes from state 1 to state 0\r\n chg_none = 0 # the number of cells that did not change\r\n index = np.arange(1, x.shape[0] - 1)\r\n for i in index:\r\n for j in index:\r\n neighborhood = x[i - 1:i + 2:1, j - 1:j + 2:1] # 3x3 sub matrix centered at i, j\r\n y[i, j] = self.update_cell(neighborhood)\r\n change = int(y[i, j] - x[i, j])\r\n if change == -1:\r\n chg_1_0 += 1\r\n if change == 0:\r\n chg_none += 1\r\n if change == 1:\r\n chg_0_1 += 1\r\n\r\n # Compute statistics excluding boundary\r\n total = np.power(x[1:-1:1, 1:-1:1].shape[0] - 1, 2)\r\n start_1 = np.sum(x[1:-1:1, 1:-1:1])\r\n end_1 = np.sum(y[1:-1:1, 1:-1:1])\r\n stats = [total, start_1, end_1, chg_1_0, chg_none, chg_0_1]\r\n\r\n return y[1:-1:1, 1:-1:1], stats # remove the boundary\r", "def reconstruct(self, X, y):\n return self.sess.run(self.x_reconstr_mean,\n feed_dict={self.x: X, self.y: y.reshape([-1, 1])})", "def transform(i, j, k):\n return i * N * N + j * N + k + 1", "def poly_matrix(x, y, order=2):\r\n ncols = (order + 1)**2\r\n G = np.zeros((x.size, ncols))\r\n ij = itertools.product(range(order+1), range(order+1))\r\n for k, (i, j) in enumerate(ij):\r\n G[:, k] = x**i * y**j\r\n return G", "def y_to_z_mapping(self, Y):\n if len(Y[0])!=self.label_width**2:\n print('input labels have different dimension')\n Z = []\n for label in Y:\n z_label = np.array(label)\n for i in range(self.label_width**2):\n z_label = np.concatenate((z_label, (label[i+1:]==label[i]).astype(int)))\n Z.append(z_label)\n return Z", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def expand_ism(ism, Y1_labels):\n \n import random\n import pandas as pd\n import numpy as np\n import time\n print('debug expand ism1') \n voxel_num=len(Y1_labels)\n voxel_ism = np.zeros((voxel_num,voxel_num))\n transform_mat=np.zeros((len(ism),voxel_num))\n \n matrixtime = time.time()\n print('debug expand ism2') \n #import pdb; pdb.set_trace()\n\n for i in range(0,voxel_num):\n transform_mat[Y1_labels[i],i]=1\n\n print('debug expand ism3') \n\n temp=np.dot(ism,transform_mat)\n print('debug expand ism4') \n\n target_mat=np.dot(temp.T,transform_mat)\n \n \n XM_time= time.time() - matrixtime\n #print('Matrix expansion took', (time.time() - matrixtime), ' seconds')\n voxel_ism=target_mat\n \n return voxel_ism", "def get_pool_data(x, y):\n\n y_classes = np.max(y)+1\n\n xs = [] # xs to put in the training set\n ys = [] # ys to put in the training set\n idxs = [] # indexes of data put in the training set\n for y_class in range(y_classes):\n idx = np.array( np.where(y == y_class) ).T\n idx = idx[0:2, 0]\n xs.append(x[idx])\n ys.append(y[idx])\n idxs.extend(idx)\n\n x_train = np.concatenate(xs, axis=0)\n y_train = np.concatenate(ys, axis=0)\n\n x_pool = np.delete(x, idxs, axis=0)\n y_pool = np.delete(y, idxs, axis=0)\n \n return (x_train, y_train), (x_pool, y_pool)", "def pull(self,x,y):\n\t\tself.x_sum -= np.sum(x,axis=0)[:,np.newaxis]\n\t\tself.y_sum -= np.sum(y,axis=0)[:,np.newaxis]\n\t\tself.xy_sum -= np.matmul(np.transpose(x),y)\n\t\tself.xx_sum -= np.matmul(np.transpose(x),x)\n\t\tself.yy_sum -= np.matmul(np.transpose(y),y)\n\t\tself.n -= np.shape(x)[0]", "def get_X_Y_vectorized_int(dataset: dict):\n X = []\n Y = []\n\n d_list = list(dataset)\n\n for k in dataset:\n X += dataset[k]\n\n temp = [0] * len(d_list)\n\n index_in_d_list = d_list.index(k)\n\n temp[index_in_d_list] = 1\n\n for i in range(len(dataset[k])):\n Y += [temp]\n\n assert len(X) == len(Y)\n return X, Y", "def make_frequency_table(x, y, X, Y):\n freq = dict()\n\n for i in range(len(X)):\n freq[X[i]] = [0, 0]\n\n # merging the two to get a matrix\n\n M = np.array([[x[i], y[i]] for i in range(len(x))])\n\n for i in range(len(M)):\n if M[i][1] == Y[0]:\n freq[M[i][0]][0] += 1\n else:\n freq[M[i][0]][1] += 1\n\n return freq", "def _get_matrix(self, source_points, destination_points):\n return [\n [self.measure_between_two_points(point_a, point_b) for point_b in destination_points]\n for point_a in source_points\n ]", "def zenith_nadir(x, y):\n if y == 'm':\n bb = []\n cc = []\n for i in range(x.shape[1]):\n bb.append(amax(x[:, i:i + 1]))\n b = array(bb)\n cc.append(amin(x[:, i:i + 1]))\n c = array(cc)\n return (b, c)\n else:\n b = ones(x.shape[1])\n c = zeros(x.shape[1])\n return (b, c)", "def fit(self, X, y):\r\n from collections import defaultdict\r\n y_set = set(y)\r\n ver_cls = dict()\r\n for i in y_set:\r\n ver_cls[i] = y.count(i) / len(y)\r\n add_data_units_cls = defaultdict(dict)\r\n add_data_all_cls = defaultdict(int)\r\n ver_words = defaultdict(dict)\r\n for sentence in range(len(X)):\r\n for word in X[sentence].split():\r\n add_data_all_cls[word] += 1\r\n if y[sentence] in add_data_units_cls[word].keys():\r\n add_data_units_cls[word][y[sentence]] += 1\r\n else:\r\n add_data_units_cls[word][y[sentence]] = 1\r\n znam = defaultdict(int)\r\n for key in add_data_units_cls.keys():\r\n for i in y_set:\r\n if i in add_data_units_cls[key].keys():\r\n znam[i] += add_data_units_cls[key][i]\r\n print(znam)\r\n for key in add_data_all_cls.keys():\r\n for i in y_set:\r\n if i not in add_data_units_cls[key].keys():\r\n add_data_units_cls[key][i] = 0\r\n ver_words[key][i] = (add_data_units_cls[key][i] + self.alpha) / (\r\n znam[i] + self.alpha * len(add_data_all_cls.keys()))\r\n self.ver_dict = ver_words\r\n self.ver_cls = ver_cls\r\n pass", "def gen_ys(self, y_func):\n y_matrix = np.zeros((self.sample_size, self.n_act, self.ydim))\n for trt in range(1, self.n_act + 1):\n y_matrix[:, trt - 1] = y_func(self.x,\n np.ones(self.sample_size).reshape(-1, 1) * trt,\n self.ydim, self.generator)\n self.ys = y_matrix", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def conv_1x1(x, y):\n (S, B, C, H, W) = x.shape\n assert x.shape[-1] == y.shape[-1]\n assert x.shape[-2] == y.shape[-2]\n assert x.shape[-4] == y.shape[-4]\n\n #[..., B, C, H, W] -> [..., B, C, HW]\n x = x.view(*x.shape[:-2], H*W)\n y = y.view(*y.shape[:-2], H*W)\n\n #[..., B, C, C']\n XTX = (x @ x.transpose(-1, -2)).sum(-3)\n XTY = (x @ y.transpose(-1, -2)).sum(-3)\n\n return XTX, XTY", "def __init__(self, xint, yint):\n self.xint = xint\n self.yint = yint\n self.n = len(xint)\n w = np.ones(self.n)\n self.C = (np.max(xint) - np.min(xint)) / 4\n shuffle = np.random.permutation(self.n - 1)\n for j in range(self.n):\n temp = (xint[j] - np.delete(xint,j)) / self.C\n temp = temp[shuffle]\n w[j] /= np.product(temp)\n self.weights = w", "def transform(self, Xs, y=None):\n pass # pragma: no cover", "def resizeXY(X, Y, occurrency, dx, dz):\n\tsumY = sum(Y) \n\tsumX = sum(X)\n\tvisitedY = [False]*len(Y)\n\tfor y_index in range(len(Y)):\n\t\tupdate = True\n\t\tfor x_index in range(len(X)):\n\t\t\tif(occurrency[x_index][y_index] == False):\n\t\t\t\tupdate = False \n\t\tif(update):\n\t\t\tsumY = sumY - Y[y_index]\n\t\t\tsumX = sumX - X[y_index]\n\t\t\tdx = dx - X[y_index]\n\t\t\tdz = dz - Y[y_index]\n\n\tfor x_index in range(len(X)):\n\t\tmodifyX = False\n\t\tfor y_index in range(len(Y)):\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == False):\n\t\t\t\tY[y_index] = (dz * Y[y_index])/sumY\n\t\t\t\tvisitedY[y_index] = True\n\t\t\t\tmodifyX = True\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):\n\t\t\t\tmodifyX = True\n\t\tif(modifyX):\n\t\t\tX[x_index] = (dx * X[x_index])/sumX", "def p_y_x_knn(y, k):\n number_of_classes = 4\n resized = np.delete(y, range(k, y.shape[1]), axis=1)\n summed_with_zero = np.vstack(np.apply_along_axis(np.bincount, axis=1, arr=resized, minlength=number_of_classes + 1))\n summed = np.delete(summed_with_zero, 0, axis=1)\n return summed / k", "def create_subsets(x, y):\n # initiate empty list for return variables.\n sets_x = []\n sets_y = []\n indices = []\n\n # iterate through value of PRI_JET_NUM (ranged inclusively from 0 until 3)\n for pri_jet_num_val in np.unique(x[:,22]):\n \n # Find subset which DER_MASS_MMC is not equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] != -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask)\n\n # Find subset which DER_MASS_MMC is equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] == -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask) \n \n # return subsets of x, y, and corresponding indices\n return sets_x, sets_y, indices", "def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix", "def get2DBins(x, y, binSizeX, binSizeY):\n\n result = []\n xlength = len(x)\n ylength = len(y)\n\n i = 0\n xcount = 0\n for i1 in range(0, xlength, binSizeX):\n i2 = i1 + binSizeX\n if i2 >= xlength:\n i2 = xlength - 1\n xcount += 1\n ycount = 0\n for j1 in range(0, ylength, binSizeY):\n j2 = j1 + binSizeY\n if j2 >= ylength:\n j2 = ylength - 1\n result.append((i1, i2, j1, j2))\n ycount += 1\n return result, xcount, ycount", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def reformat(x, y):\r\n # img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))\r\n img_size, num_ch, num_class = 14, 1, 16\r\n dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)\r\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??\r\n return dataset, labels", "def reduce_y(y, mask):\n return y", "def possible_splits(self,feature,y):\n\n yi = y[:-1]\n yi1= y[1:]\n idx=np.argwhere((yi1-yi)!=0)\n return idx.flatten()", "def matrix_add(x,y) -> [[]]:\n X = x\n\n Y = y\n\n if len(X) == len(Y) and len(X[0]) == len(Y[0]):\n return [[X[a][b] + Y[a][b] for b in range(len(X[0]))]\n for a in range(len(X))]", "def __get_masks(x_shape, y, n_train=None):\n # type: (Tuple[int], np.ndarray, int) -> (np.ndarray, np.ndarray)\n n_train = n_train if n_train is not None else const.n_train\n\n if n_train <= 0 or n_train > x_shape[0]:\n return np.full(shape=x_shape, fill_value=True, dtype=bool), np.full(shape=y.shape, fill_value=True, dtype=bool)\n\n all_indexes = defaultdict(list) # type: Dict[int, List[int]]\n for i in range(len(y)):\n curr = int(y[i])\n all_indexes[curr].append(i)\n\n ratios = defaultdict() # type: Dict[int, float]\n\n for i, j in all_indexes.items():\n ratios[i] = (len(j) * 1. / len(all_indexes[0]))\n\n # Ratios split the whole dataset to ratios given class and first class.\n # Part scales these ratios up, so that, 'part' corresponds to size of first class.\n part = n_train * 1. / sum(ratios.values())\n if part == 0: # n_train is 0.\n part = len(y) * 1. / sum(ratios.values())\n\n # Masks of what to keep.\n indexes_x = np.full(shape=x_shape, fill_value=False, dtype=bool)\n indexes_y = np.full(shape=y.shape, fill_value=False, dtype=bool)\n\n for i in all_indexes.keys():\n chosen_idxs = random.sample(all_indexes[i], int(part * ratios[i]))\n indexes_y[chosen_idxs] = True\n indexes_x[chosen_idxs, ...] = True\n\n return indexes_x, indexes_y", "def effmat(tp: np.ndarray, t: np.ndarray) -> np.ndarray:\n n = len(t) # batch size\n max_true_n_sources = max(t)\n max_pred_n_sources = max(tp)\n eff_mat = np.zeros((max_pred_n_sources + 1, max_true_n_sources + 1))\n for ii in range(n):\n eff_mat[tp[ii], t[ii]] += 1\n return eff_mat", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def make_oneq_cliffords():\n ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]\n ih_list = [g().to_matrix() for g in (IGate, HGate)]\n irs_list = [\n IGate().to_matrix(),\n SdgGate().to_matrix() @ HGate().to_matrix(),\n HGate().to_matrix() @ SGate().to_matrix(),\n ]\n oneq_cliffords = [\n Operator(ixyz @ ih @ irs) for ixyz in ixyz_list for ih in ih_list for irs in irs_list\n ]\n return oneq_cliffords", "def get_score_matrix(self) -> int:", "def fit(self, X, y):\n\t\tself._initialize_weights(X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tif self.shuffle:\n\t\t\t\tX, y = self._shuffle(X,y)\n\t\t\tcost = []\n\t\t\t#calculate for each sample\n\t\t\tfor xi, target in zip(X, y):\n\t\t\t\tcost.append(self._update_weights(xi, target))\n\t\t\tave_cost = sum(cost)/len(y)\n\t\t\tself.cost_.append(ave_cost)\n\t\treturn self", "def monomio(x,datos_x,datos_y):\n matriz=np.zeros([datos_x.shape[0],datos_x.shape[0]])\n for j in range(datos_x.shape[0]): #Se contruye la matriz de vandermonde\n matriz[:,j]= datos_x**(j)\n matriz,datos_y=pivoteo_parcial(matriz,datos_y)\n x1= descompo_LU(matriz,datos_y)# se resulve el sistema de ecuaciones por metodo directo\n\n puntos=[] #se almacenan los valores de y para cada punto de x que se quiera calcular \n\n for p in x: #va a ir tomando los valores de x uno por uno \n prod=np.zeros(x1.shape[0])\n for i in range(x1.shape[0]):\n if i==0:\n prod[i]=1\n else:\n prod[i]=prod[i-1]*p #Se hace el calculo de los polimonios con todos los valores de x \n solucion=x1@prod\n puntos.append(solucion) # se agregan los valores de y a la lista final \n puntos=np.array(puntos)# se convierte la lista en array para mejor manejo\n\n return puntos", "def calcAllIntensities(self, xc, yc):\n\n tp = 0.0\n ix = 0\n iy = 0\n h = 0\n ints = np.zeros([5, 5])\n ints_inner = np.zeros([5, 5])\n # ints = [[0.0] * 5] * 5\n # ints_inner = [[0.0] * 5] * 5\n x = 0.0\n y = 0.0\n xc1 = 0.0\n yc1 = 0.0\n xc1 = xc\n yc1 = yc\n \n for h in np.arange(1,5,1):\n for k in np.arange(1,5,1):\n ints[h][k] = 0.0\n ints_inner[h][k] = 0.0\n\n for k in np.arange(0, 2, 1):\n for h in np.arange(0, 2, 1):\n for ix in np.arange(0, self.stepp + 1, 1):\n for iy in np.arange(0, self.stepp + 1, 1):\n #print(k, h, ix, iy)\n if self.qc_format == 0 :\n x = -(1 + self.G) + h * (1 + 2 * self.G) + (ix * (1.0 / self.stepp))\n y = -(1 + self.G) + k * (1 + 2 * self.G) + (iy * (1.0 / self.stepp))\n if self.spot_radius == 0 or math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))) / ((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))\n tp = math.pow(tp,2)\n #print(tp)\n elif self.qc_format == 1 :\n x = -1 + h + (ix * (1 / self.stepp))\n y = -1 + k + (iy * (1 / self.stepp))\n ints[h + 1][k + 1] += math.pow(math.exp((math.pow((x - xc1),2) + math.pow((y - yc1),2) ) / math.pow(self.spot_radius,2)), -1)\n if (self.spot_radius * self.spot_radius) == 0 or ((x - xc1) * (y - yc1) * np.pi * np.pi) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((x - xc1) * np.pi / self.spot_radius) * math.sin((y - yc1) * np.pi / self.spot_radius)) / (((x - xc1) * (y - yc1) * np.pi * np.pi) / (self.spot_radius * self.spot_radius))\n\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.radius_inner,2):\n ints_inner[h + 1][k + 1] += tp\n else :\n if self.qc_format == 1 :\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.cell_qc, 2):\n ints[h + 1][k + 1] += tp\n if (math.pow(x,2) + math.pow(y,2)) <= 1 :\n #print(math.pow(x,2) + math.pow(y,2))\n ints[h + 1][k + 1] += tp\n # print(ints[h + 1][k + 1])\t\t\t\t\t\t\n tp = 0.0\n\n # print(ints)\n\n Aq = 0.0\n Bq = 0.0\n Cq = 0.0\n Dq = 0.0\n Ac_inner = 0.0\n Bc_inner = 0.0\n Cc_inner = 0.0\n Dc_inner = 0.0\n Ac = 0.0\n Bc = 0.0\n Cc = 0.0\n Dc = 0.0\n Ac = ints[1][2]\n Bc = ints[2][2]\n Cc = ints[2][1]\n Dc = ints[1][1]\n\n Ac_inner = ints_inner[1][2]\n Bc_inner = ints_inner[2][2]\n Cc_inner = ints_inner[2][1]\n Dc_inner = ints_inner[1][1]\n Ac *= self.QE\n Bc *= self.QE\n Cc *= self.QE\n Dc *= self.QE\n\n Ac_inner *= self.QE_inner\n Bc_inner *= self.QE_inner\n Cc_inner *= self.QE_inner\n Dc_inner *= self.QE_inner\n Ac += Ac_inner\n Bc += Bc_inner\n Cc += Cc_inner\n Dc += Dc_inner\n\n Aq = Ac\n Bq = Bc\n Cq = Cc\n Dq = Dc\n\n #tp/TP = cotribution percentage of the spot with respect to max (spot center)\n if self.smooth == 0 :\n if (Config.hplk_c0_e * self.TP) == 0 :\n cnst = 0\n else :\n cnst = ((Parameters.TPS / (self.n_ml * self.n_ml)) * self.lamb) / (Config.hplk_c0_e * self.TP) #Número de fótons efeticos\n if Config.flag_spice == 1 :\n Ac *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP) #W\n Bc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Cc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Dc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Ac *= 1 / (math.pow(self.cell_qc * 1e-6,2)) #W/(m^2)\n Bc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Cc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Dc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n #Ac *= 1 / (self.lamb * 1e6); #Adequação da irradiância para a unidade W/m2micm conforme necessário no SPICE\n #Bc *= 1 / (self.lamb * 1e6);\n #Cc *= 1 / (self.lamb * 1e6);\n #Dc *= 1 / (self.lamb * 1e6);\n \n ############################## DOUBLE CHECK ##############################\n # self.grava_arquivos = 1\n # self.flag_V_QC = 0\n # grava_le_arquivos(0) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # self.flag_V_QC = 1\n # self.grava_arquivos = 0\n ############################## DOUBLE CHECK ##############################\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n else :\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n\n # 'returns' all the intensities\n self.A_intensity = Aq\n self.B_intensity = Bq\n self.C_intensity = Cq\n self.D_intensity = Dq", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def clifford_set(u):\n i, x, y, z = u.v\n result = []\n result.append(u.clone()) # I\n result.append(Uop(-x, i, -z, y, u.hierarchy, u.construction + [\"X\"], gateset=u.gateset)) # iX, but treat it as X due to only phase difference\n result.append(Uop((i-x)/SQRT2, (x+i)/SQRT2, (y-z)/SQRT2, (z+y)/SQRT2, u.hierarchy, u.construction + [\"(I+iX)\"], gateset=u.gateset))\n result.append(Uop((i+x)/SQRT2, (x-i)/SQRT2, (y+z)/SQRT2, (z-y)/SQRT2, u.hierarchy, u.construction + [\"(I-iX)\"], gateset=u.gateset))\n result.append(Uop((i-y)/SQRT2, (x+z)/SQRT2, (y+i)/SQRT2, (z-x)/SQRT2, u.hierarchy, u.construction + [\"(I+iY)\"], gateset=u.gateset))\n result.append(Uop((i+y)/SQRT2, (x-z)/SQRT2, (y-i)/SQRT2, (z+x)/SQRT2, u.hierarchy, u.construction + [\"(I-iY)\"], gateset=u.gateset))\n for idx in range(6):\n i, x, y, z = result[idx].v\n c = result[idx].construction[-1:] if idx != 0 else []\n result.append(Uop(-z, -y, x, i, u.hierarchy, u.construction + c + [\"Z\"], gateset=u.gateset)) # iZ\n result.append(Uop((i-z)/SQRT2, (x-y)/SQRT2, (y+x)/SQRT2, (z+i)/SQRT2, u.hierarchy, u.construction + c + [\"(I+iZ)\"], gateset=u.gateset))\n result.append(Uop((i+z)/SQRT2, (x+y)/SQRT2, (y-x)/SQRT2, (z-i)/SQRT2, u.hierarchy, u.construction + c + [\"(I-iZ)\"], gateset=u.gateset))\n\n return result", "def _split_indices(X, y, n_folds=5):\n # TODO: check if indices are permuted\n n = X.shape[0]\n print('n:', n)\n #original_indices = np.arange(n)\n shuffle = np.random.permutation(n)\n subset_proportion = 1./float(n_folds)\n fold_size = int(subset_proportion*n)\n folds = [shuffle[i*fold_size:(i+1)*fold_size] for i in range(n_folds)]\n return folds", "def mi_bin_conn_time(x, y, bins_x, bins_y):\n n_times, n_trials = x.shape\n mi = np.zeros((n_times), dtype=np.float32)\n for t in range(n_times):\n mi[t] = mi_bin(x[t, :], y[t, :], bins_x, bins_y)\n return mi", "def cumulative_capacity_rule(_m, g, y):\r\n\r\n return sum(m.x_c[g, j] for j in m.Y if j <= y)", "def reconstructXY(self, inputs):\n return (self.reconstructX(inputs),\n self.reconstructY(inputs))", "def update_output(self, latent_mat, weight_mat, y_list):", "def _recon_lcs(x, y):\n i, j = len(x), len(y)\n table = _lcs(x, y)\n\n def _recon(i, j):\n \"\"\"private recon calculation\"\"\"\n if i == 0 or j == 0:\n return []\n elif x[i - 1] == y[j - 1]:\n return _recon(i - 1, j - 1) + [(x[i - 1], i)]\n elif table[i - 1, j] > table[i, j - 1]:\n return _recon(i - 1, j)\n else:\n return _recon(i, j - 1)\n\n recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))\n return recon_tuple", "def add_dims_to_match(x, y):\n x_shape = tf.shape(x)\n new_dims = tf.rank(y)-tf.rank(x)\n if new_dims > 0:\n new_shape = tf.concat((x_shape, tf.ones((new_dims,), dtype=tf.int32)), axis=0)\n return tf.reshape(x, new_shape)\n else:\n return x", "def inplace(block_size=20000):\n y = np.empty(len(x))\n for k in range(len(x) // block_size + 1):\n b, e = k * block_size, (k+1) * block_size\n y[b:e] = x[b:e]\n y[b:e] *= .25\n y[b:e] += .75\n y[b:e] *= x[b:e]\n y[b:e] -= 1.5\n y[b:e] *= x[b:e]\n y[b:e] -= 2\n\n return y" ]
[ "0.5772924", "0.573736", "0.57085884", "0.5653021", "0.56054556", "0.54707366", "0.5380854", "0.53584445", "0.5358038", "0.53385925", "0.5314144", "0.5285138", "0.52835494", "0.52652776", "0.52577674", "0.5253505", "0.5253083", "0.524562", "0.5237841", "0.52344394", "0.5211222", "0.51828897", "0.51802444", "0.5178981", "0.5178981", "0.51725096", "0.51699585", "0.5166547", "0.5157785", "0.515223", "0.51469713", "0.51321095", "0.5115334", "0.5087791", "0.50812095", "0.5076138", "0.5074466", "0.5057698", "0.50500864", "0.50500864", "0.5035716", "0.50207555", "0.49975905", "0.4982586", "0.49759686", "0.49691507", "0.49595478", "0.4948456", "0.49445125", "0.49383506", "0.49262735", "0.4912366", "0.4910826", "0.4906799", "0.49047226", "0.48978412", "0.4896438", "0.48920682", "0.4891731", "0.48900634", "0.48893338", "0.48886907", "0.4885065", "0.48842517", "0.4881399", "0.4880126", "0.48797235", "0.48771152", "0.4866483", "0.48636025", "0.48633644", "0.485049", "0.48362094", "0.4832051", "0.48197523", "0.48140198", "0.48039287", "0.4798375", "0.47875795", "0.47807667", "0.477661", "0.47749743", "0.47744423", "0.47732797", "0.47727358", "0.4771551", "0.47644138", "0.4764349", "0.47624502", "0.47509244", "0.47450113", "0.4740621", "0.47403514", "0.47384232", "0.47349578", "0.47333637", "0.47283027", "0.47274932", "0.47198072", "0.47189203", "0.4718727" ]
0.0
-1
Calculate number of edits to convert y to x
def distance(self, x, y, keyboard_weight=None): dist_matrix = self.distance_matrix(x, y, keyboard_weight) return dist_matrix[-1, -1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_applies(self):\n ops = 0\n for _, remainder, _ in self:\n ops += len(remainder)\n return ops", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def n_actions(self) -> int:\n return np.unique(self.y).shape[0]", "def y(self) -> int:", "def n_diff(self):\n return 1 + int(self.differential)", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def count_difference(patch1, patch2):\n\n\treturn np.sum(np.square(patch1 - patch2))", "def edit_distance(x, y):\n\n global recursion_depth\n global num_function_calls\n recursion_depth += 1\n num_function_calls += 1\n indent = \" \" * recursion_depth\n print(\"%sBEGIN edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n n = len(x)\n m = len(y)\n if n == 0:\n ed = m\n elif m == 0:\n ed = n\n else:\n ed1 = edit_distance(x, y[0:m-1]) + 1\n ed2 = edit_distance(x[0:n-1], y) + 1\n ed3 = edit_distance(x[0:n-1], y[0:m-1]) + (1 if x[-1] != y[-1] else 0)\n ed = min(ed1, ed2, ed3)\n print(\"%sEND edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n recursion_depth -= 1\n return ed", "def number_of_loc_changes(self) -> int:\n raise NotImplementedError('not implemented')", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def getOutputLength(self):\n return len(self.Y[0])", "def nr_points(self):\n return len(self.x)", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def count_liberties(self, x, y):\n return len(self.get_liberties(x, y))", "def count():", "def count(x):\n return sum(len(y) for y in x)", "def __stars__(self):\n t_ops = self.__ops__[1::2]\n return t_ops.count('x') + t_ops.count('X')", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def number_of_new_components(self):\n t_low = self.lower_binary_tree().to_tilting()\n t_up = self.upper_binary_tree().to_tilting()\n return len([p for p in t_low if p in t_up])", "def _setNumber(x, y, xsize, ysize, field):\n count = 0\n xarray = np.array([x-1, x-1, x-1, x, x, x+1, x+1, x+1])\n yarray = np.array([y-1, y, y+1, y-1, y+1, y-1, y, y+1])\n for i in range(8):\n if (xarray[i] < 0) or (yarray[i] < 0):\n pass\n elif (xarray[i] > xsize-1) or (yarray[i] > ysize-1):\n pass\n else:\n if field[xarray[i], yarray[i]] == np.inf:\n count += 1\n return count", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np", "def countPanelTrajectoryExpressions(self):\n return 1 + self.child.countPanelTrajectoryExpressions()", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def deltaCnt(self,new_cnt, past_cnt):\r\n delta = new_cnt - past_cnt\r\n if delta < -1 * (\r\n 2 ** 15): # Checks if the encoder values have rolled over, and if so, subtracts/adds accordingly to assure normal delta values\r\n delta += (2 ** 16)\r\n elif delta > (2 ** 15):\r\n delta -= (2 ** 16)\r\n old_cnt = new_cnt\r\n return delta, old_cnt", "def __len__(self):\n from math import sqrt\n\n #nicer notation to make it easier to read.\n\n a, b = self.x, self.y\n\n return int(sqrt(a**2 + b**2))", "def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n count += 1\n if self[x][y]==-color:\n count -= 1\n return count", "def n(self):\n return sum(self._comp.values())", "def numCoords(self):\n return self.nCoords", "def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count", "def countPanelTrajectoryExpressions(self):\n nbr = 0\n for e in self.children:\n nbr += e.countPanelTrajectoryExpressions()\n return nbr", "def score_(self, X, y):\n return self.calculate_truncated_energies().sum()", "def change(self):\n return _n.reshape(self.next_x - self.x, self.original_shape)", "def nNx(self):\n return self.nCx + 1", "def sample_count(self):\n assert len(self.decay_x) == len(self.decay_y)\n return len(self.decay_x)", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def numberOfPoints(self):\n return 20000", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def positions_num_buzzes_counts(buzzes_per_position,num_models):\n max_num_buzzes = num_models\n max_pos = max([len(a) for a in buzzes_per_position])\n \n\n pos_buzzes = np.zeros((max_pos,max_num_buzzes+1))\n\n for question in buzzes_per_position:\n for pos, buzzes in enumerate(question):\n pos_buzzes[pos][buzzes] += 1\n return pos_buzzes", "def get_number_operators(self) -> int:\n if self.sons:\n return 1 + sum([son.get_number_operators() for son in self.sons])\n return 0", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def n_inputs(self):", "def _find_epochs(self, history):\n \n epoch_count = len(history.history['val_loss'])\n\n return epoch_count", "def n(self):\n return len(self.marks)", "def nr_of_bees_at(self, point):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception\n \n point = point.copy()\n \n if self.phase == 2:\n point -= self.position\n elif self.phase > 2:\n point = self.transform(point - self.position)\n\n return sum(map(lambda x: array_equal(point,x),pos))", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def grid_point_count(self):\n return pytools.product(self.grid_point_counts())", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def getNumPoints(self, l1, l2):\n n1 = self.pointcounts[l1]\n n2 = self.pointcounts[l2]\n self.pointcounts[('Cl_%d' % self.labelcount, l1, l2)] = n1 + n2\n return (n1, n2)", "def _count_discordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_discordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)", "def length(self):\n return len(self.x)", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def getNumCleanedTiles(self):\n tilesCopy = {}\n tilesCopy = self.tiles.copy()\n numCleanTiles = 0\n \n for posTupleKey, posVal in tilesCopy.items():\n if posVal == 1:\n numCleanTiles += 1\n return numCleanTiles\n #raise NotImplementedError", "def calculate_width(self):\n return self.endX - self.startX", "def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)", "def run_edits(self):\n _edit_count = 0\n for main_key in self.job[\"edits\"]:\n for _item in self.job[\"edits\"][main_key]:\n _edit_count += self.json_o.edit(main_key,\n _item, self.job[\"edits\"][main_key][_item])\n return _edit_count", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_correct_per_question(responses, bundle_to_ix, gold_answers):\n num_correct_guesses = []\n for question in responses:\n gold_answer = gold_answers[question[0]['question_idx']]\n num_correct_guesses.append(sum([1 if gold_answer==guess else 0 \\\n for guess in question[-1]['guesses']]))\n return num_correct_guesses", "def linecounter(x):\n return linecount(x) + longlines(x)", "def num_points_sweep(self, start, stop, step):\r\n return(abs((stop - start)//step) + 1)", "def get_hit_num(pred, y_truth):\n\n hit_num = 0\n for i in range(len(y_truth)):\n for value in y_truth[i]:\n hit_num += np.sum(pred[i] == value)\n return hit_num", "def numIncrementals(self) -> int:\n return len(self._dataArrays)", "def __len__(self):\n return sum(abs(j) for (i, j) in self)", "def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0", "def __len__(self):\r\n\r\n return self.yInput.shape[1]", "def _calculate_bit_size(self, history: sizing_executor.SizeAndDTypes) -> int:\n bit_size = 0\n for num_elements, dtype in history:\n bit_size += num_elements * self._bits_per_element(dtype)\n return bit_size", "def get_number_of_evaluation(self):\n return self.n_eval", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def n_ins(self):\n pass", "def num_inducing(self) -> tf.Tensor:\n raise NotImplementedError", "def ydim(self):\n return len(self._y)", "def calibration(self) -> int:", "def _ss_tot(self):\n squares = np.square(self.y - np.expand_dims(self._ybar, axis=-2))\n if self.w is None:\n return np.sum(squares, axis=-2)\n else:\n return np.sum(np.matmul(self.w_diag, squares), axis=-2)", "def test_expand_counts(self):\n c = array([2,0,1,2])\n self.assertEqual(expand_counts(c), array([0,0,2,3,3]))", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def epoch(self):\n return len(self.history)", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1", "def calculate_accuracy(mod_pred, y):\n count = 0\n for i, y_true in enumerate(y):\n if y_true == mod_pred[i]:\n count += 1\n\n return count/len(y)", "def num_conll(self):\n pass", "def how_many(e, x):\n return count(np.asarray(x) == e)", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def __len__(self):\n return len(self.__squares) * len(self.__squares[0])", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def vertex_num(self, x, y):\n width, _ = self.size\n return 1 + (width*y) + x", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def final_frequency(changes: Sequence[int]) -> int:\n return sum(changes)", "def nPoints(self):\n return self._c_param.shrake_rupley_n_points", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def board_change(self, dot_distance):\n global num_dots\n (width, height) = self.calculate_size(num_dots)\n return dot_distance, height, width", "def ipset_num_x_y_different():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 3), x_new=np.linspace(2, 5, 4))", "def _count_concordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)", "def calc_diffs(i, j, correct_x, correct_y):\n return abs(i - correct_x) + abs(j - correct_y)", "def nspatials(self):\n return int(len(self)/2)" ]
[ "0.6166617", "0.591628", "0.58021086", "0.57318366", "0.5673574", "0.5647557", "0.5594943", "0.5514994", "0.550421", "0.5470522", "0.54313797", "0.54289174", "0.542215", "0.53802377", "0.5366139", "0.5360295", "0.53558695", "0.53518134", "0.5347793", "0.53407663", "0.5332358", "0.53095835", "0.5308614", "0.53023684", "0.52960336", "0.529324", "0.526216", "0.52417165", "0.5241245", "0.5231755", "0.5222951", "0.52213174", "0.52006936", "0.519411", "0.5186669", "0.51853925", "0.51805353", "0.5177607", "0.51679873", "0.51642185", "0.5160571", "0.51600903", "0.51581794", "0.5152922", "0.51477635", "0.51450205", "0.51391596", "0.5137558", "0.5128115", "0.5122492", "0.51099646", "0.51099646", "0.5107099", "0.5103248", "0.5099385", "0.50988734", "0.50957227", "0.50926787", "0.5088854", "0.5078023", "0.5072961", "0.50726324", "0.5070563", "0.50681937", "0.50665635", "0.50646", "0.50571513", "0.5027427", "0.5024409", "0.5009537", "0.5009373", "0.50075096", "0.5006749", "0.4995143", "0.49951017", "0.49948266", "0.49933627", "0.49871317", "0.49867877", "0.49763373", "0.49760038", "0.49736023", "0.49736023", "0.4972189", "0.49667943", "0.4965877", "0.49644366", "0.49626288", "0.496197", "0.49589106", "0.49578083", "0.4956079", "0.49559537", "0.49557847", "0.49422568", "0.49420282", "0.4941953", "0.49399373", "0.49369472", "0.49368745", "0.4936736" ]
0.0
-1
Return a dataframe of distance matrix of x and y. Indexes are letters of x and columns are letters of y.
def distance_dataframe(self, x, y, keyboard_weight=None): dist_matrix = self.distance_matrix(x, y, keyboard_weight) dist_df = pd.DataFrame(dist_matrix, index=["", *list(x)], columns=["", *list(y)]) return dist_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)", "def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))", "def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat", "def get_distance_matrix():\n df_afstandn2 = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_poi_afstand\n WHERE afstand < 1000\n \"\"\")\n return df_afstandn2", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def get_distance_matrix(df, distance_measure, feat_col_ix=1):\n n = len(df)\n dist_matrix = np.zeros((n,n))\n for i in range(n):\n for j in range(j):\n si = df.iloc[i, feat_col_ix:]\n sj = df.iloc[j, feat_col_ix:]\n dist_matrix[i,j] = distance_measure(si, sj)[0]\n return dist_matrix", "def build_distance_matrix(path_to_embeddings):\n\n embed_df = pd.read_csv(path_to_embeddings)\n print (\"length is: \", len(embed_df))\n columns = list(embed_df)\n\n \n distances = euclidean_distances(embed_df.iloc[:, 1:], embed_df.iloc[:, 1:])\n embed_df = embed_df.set_index([columns[0]])\n # format distance matrix\n distances_df = pd.DataFrame(distances)\n distances_df.columns = list(embed_df.index)\n distances_df.index = list(embed_df.index)\n\n print (\"finished building the distance matrix ...\")\n\n print (\"///////////////////\")\n print (len(distances_df))\n\n return distances_df", "def calculateDistances(df):\n return", "def euclidean_distances(X, Y):\r\n\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sqrt(np.sum((X[X_idx,:]-Y[Y_idx,:])**2))\r\n \r\n return D", "def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix", "def pairwise_euclidean_distance(x, y):\n m, n = x.size(0), y.size(0)\n dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() \\\n - 2 * torch.matmul(x, y.t())\n # for numerical stability\n dist_mat = dist_mat.clamp(min=1e-12).sqrt()\n return dist_mat", "def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def compute_euclidean_distance_matrix(locations):\n distances = {}\n distances_df=get_times(locations)\n print(distances_df)\n print(distances_df.iloc[0,0])\n print(distances_df.iloc[0,1])\n print(distances_df.iloc[0,2])\n for from_counter, from_node in enumerate(locations):\n distances[from_counter] = {}\n for to_counter, to_node in enumerate(locations):\n distances[from_counter][to_counter] = (int(\n distances_df.iloc[from_counter,to_counter]))\n return distances", "def get_euclidean_matrix(df):\n df.reset_index(drop=True, inplace=True)\n\n # foods = df['food_names']\n # food_examples = []\n # indices = list(range(0, len(foods)))\n # for i in indices:\n # food_examples.append(str(foods[i]) + str(i))\n # food_examples = pd.Series(food_examples)\n food_examples = df['food_names']\n\n df = df.drop(['food_names', 'height', 'weight', 'above_range', 'BMI', 'age', 'gender',\n 'glucose_tolerance_category','90-percentile_of_2h-iAUC', 'average_carbs_ratio',\n 'average_daily_carbs','average_meals_per_day', 'average_sleep_hours',\n 'average_glucose', 'baseline', 'coefficient_of_variation', 'max_2-hours_iAUC',\n 'median_fasting_glucose_level','median_of_2h-iAUC', 'night_baseline'], axis='columns')\n\n df = df.replace([-np.inf], 0).dropna(axis=1)\n\n num_examples = df.shape[0]\n\n distances = pdist(df.values, metric='euclidean')\n print(distance)\n dis_array = squareform(distances)\n print(dis_array)\n dis_df = pd.DataFrame(data = dis_array, index=food_examples, columns=food_examples)\n print(dis_df)\n writer = pd.ExcelWriter('Euclidean_distance_icarbonx.xlsx', engine='xlsxwriter')\n dis_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()", "def distance_matrix(self, x, y, keyboard_weight=None):\r\n # create distance matrix\r\n size_x = len(x) + 1\r\n size_y = len(y) + 1\r\n dist_matrix = np.zeros((size_x, size_y))\r\n for i in range(size_x):\r\n dist_matrix[i, 0] = i\r\n for j in range(size_y):\r\n dist_matrix[0, j] = j\r\n\r\n ## fill distance matrix\r\n # no keyboard weight\r\n if not keyboard_weight:\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n subs = dist_matrix[i-1, j-1] + 1\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # manhattan keyboard weight\r\n elif keyboard_weight == \"manhattan\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.manhattan_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # euclidean keyboard weight\r\n elif keyboard_weight == \"euclidean\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.euclidean_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n \r\n return dist_matrix", "def distance_matrix(X, Y, metric):\n distance = np.zeros((len(X), len(Y)))\n for i in range(len(X)):\n for j in range(len(Y)):\n m = metric(X[i], Y[j])\n if np.isnan(m):\n pdb.set_trace()\n distance[i, j] = m\n return distance", "def create_cols_distances(df):\n #create a column for haversine distance\n df['distance'] = haversine_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['manhattan_distance'] = dummy_manhattan_distance(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['bearing'] = bearing_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n return df", "def get_distance_matrix(grouped_distance):\n return grouped_distance.groupby(\n F.col(\n \"category_a\"\n ).alias(\n \"category\"\n )\n ).pivot(\n \"category_b\"\n ).agg(\n F.expr(\n \"coalesce(min(distance), 10000.00)\"\n )\n ).orderBy(\n \"category\"\n )", "def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists", "def compute_distance(df):\n pass", "def get_matches_df(sparse_matrix, name_vector):\n\n name_vector_list = pd.Series(list(map(str, name_vector)))\n\n non_zeros = sparse_matrix.nonzero()\n\n sparserows = non_zeros[0]\n sparsecols = non_zeros[1]\n\n nr_matches = sparsecols.size\n\n left_side = np.empty([nr_matches], dtype=object)\n right_side = np.empty([nr_matches], dtype=object)\n similarity = np.zeros(nr_matches)\n pos_left = np.zeros(nr_matches, dtype=np.int)\n pos_right = np.zeros(nr_matches, dtype=np.int)\n\n for index in range(0, nr_matches):\n left_side[index] = name_vector_list[sparserows[index]]\n right_side[index] = name_vector_list[sparsecols[index]]\n similarity[index] = sparse_matrix.data[index]\n pos_left[index] = sparserows[index]\n pos_right[index] = sparsecols[index]\n\n return pd.DataFrame({'left_side': left_side,\n 'right_side': right_side,\n 'similarity': similarity,\n 'pos_left': pos_left,\n 'pos_right': pos_right})", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def cosine_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = 1 - (np.dot(X[X_idx,:],Y[Y_idx,:]) / (np.sqrt(np.dot(X[X_idx,:], X[X_idx,:]))* np.sqrt(np.dot(Y[Y_idx,:], Y[Y_idx,:])))) \r\n return D", "def _compute_pairwise_distance(\n x: np.ndarray, y: np.ndarray, symmetric: bool, distance_callable: DistanceCallable\n) -> np.ndarray:\n _x = _make_3d_series(x)\n _y = _make_3d_series(y)\n x_size = _x.shape[0]\n y_size = _y.shape[0]\n\n pairwise_matrix = np.zeros((x_size, y_size))\n\n for i in range(x_size):\n curr_x = _x[i]\n for j in range(y_size):\n if symmetric and j < i:\n pairwise_matrix[i, j] = pairwise_matrix[j, i]\n else:\n pairwise_matrix[i, j] = distance_callable(curr_x, _y[j])\n return pairwise_matrix", "def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):\n n = len(dnas)\n result = [[0] * n for _ in range(n)]\n for pair in itertools.combinations(zip(range(n), dnas), r=2):\n (idx1, dna1), (idx2, dna2) = pair\n distance = metric(dna1, dna2)\n distance = distance / max(len(dna1), len(dna2)) if relative else distance\n result[idx1][idx2] = distance\n result[idx2][idx1] = distance\n if as_ndarray:\n result = np.asarray(result)\n return result", "def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]", "def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def getDistance(X1, X2):\n distance = 0 \n length = len(X1)\n for i in range(length):\n distance += (X1[i] - X2[i])**2 # differences of the columns squared\n \n distance = math.sqrt(distance)\n\n return distance", "def sax_table_dist(self,other, alphabet):\n distance_matrix = np.array([[0],[0]])\n distance_matrix = alphabet.get_distance_matrix(other.cardinality)\n return distance_matrix[self.sax_character][other.sax_character]", "def getDistanceMatrix(self):\n return self.distmat.as_matrix()", "def get_distance_matrix(visits: List[str], distances: Dict[Tuple[str, str], float]) -> List[List[float]]:\n\n return [[distances[i,j] for j in visits] for i in visits]", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists", "def get_pca_distances(components_df):\n d = pd.DataFrame()\n for name_i, srs_i in components_df.T.iteritems():\n \n for name_j, srs_j in components_df.T.iteritems():\n d.loc[name_i,name_j] = np.linalg.norm(srs_i-srs_j)\n return d", "def compute_distances_matrix(positions, max_distance, pixel_size=None):\n module_logger.info('Computing distances between spots')\n\n if len(positions) < 2:\n raise Exception('Not enough dimensions to do a distance measurement')\n\n channel_permutations = list(permutations(range(len(positions)), 2))\n\n if not pixel_size: # TODO: make sure the units are corrected if no pixel size\n pixel_size = np.array((1, 1, 1))\n module_logger.warning('No pixel size specified. Using the unit')\n else:\n pixel_size = np.array(pixel_size)\n\n for a, b in channel_permutations:\n distances_matrix = cdist(positions[a], positions[b], w=pixel_size)\n\n distances_df = DataFrame()\n\n for i, (pos_A, d) in enumerate(zip(positions[a], distances_matrix)):\n if d.min() < max_distance:\n distances_df = distances_df.append({\"channel_a\": a,\n \"channel_b\": b,\n \"z_coord_a\": pos_A[0],\n \"y_coord_a\": pos_A[1],\n \"x_coord_a\": pos_A[2],\n \"z_coord_b\": positions[b][d.argmin()][0],\n \"y_coord_b\": positions[b][d.argmin()][1],\n \"x_coord_b\": positions[b][d.argmin()][2],\n \"z_dist\": pos_A[0] - positions[b][d.argmin()][0],\n \"y_dist\": pos_A[1] - positions[b][d.argmin()][1],\n \"x_dist\": pos_A[2] - positions[b][d.argmin()][2],\n 'dist_3d': d.min(),\n \"labels_a\": i,\n \"labels_b\": d.argmin()\n }, ignore_index=True\n )\n\n return distances_df", "def format_distance_matrix(labels, data):\r\n return format_matrix(data, labels, labels)", "def create_dist_matrix(matrix):\n #Convert input data matrix to numpy matrix\n matrix = np.array(matrix)\n n = matrix.shape[0]\n \n #Iterate through number of samples to create distance matrix\n for i in range(n):\n dist_array = euclidean_distance(matrix[i,:], matrix)\n if i == 0:\n dist_matrix = dist_array\n else:\n dist_matrix = np.concatenate((dist_matrix, dist_array), axis = 1)\n return dist_matrix", "def compute_distance_matrix_from_metadata(column_data):\r\n data_row = array(column_data)\r\n data_col = reshape(data_row, (1, len(data_row)))\r\n dist_mtx = abs(data_row - data_col.T)\r\n\r\n return dist_mtx", "def distance_matrix(cities):\n\n return [[city1.distance(city2) for city2 in cities]\n for city1 in cities]", "def add_distance_features(df_kek):\n df = pd.DataFrame([])\n df['distance'] = get_distance_vector(df_kek, 'latitude', 'longitude', 'del_latitude', 'del_longitude')\n df['distance_dest_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'del_latitude', 'del_longitude')\n df['distance_start_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'latitude', 'longitude')\n df['route_distance'] = df_kek.apply(lambda x: get_route_distance(x['route']), axis=1)\n df[df['route_distance'] == 0.0] = df['route_distance'].median()\n df = pd.concat([df, pd.get_dummies(df_kek['main_id_locality'], prefix='City')], axis=1)\n return df", "def get_cosine_similarity_df(word2vec: Word2Vec) -> pd.DataFrame:\n sim = get_cosine_similarity(word2vec)\n return pd.DataFrame(sim, index=word2vec.wv.index2word, columns=word2vec.wv.index2word)", "def manhattan_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sum(np.abs(X[X_idx,:] - Y[Y_idx,:]))\r\n \r\n return D", "def cross_distances(df_row: pd.DataFrame, df_col: pd.DataFrame, distance_function=None):\n if distance_function is None:\n disf = lambda x1, x2: (np.sqrt(np.sum(np.power(x2-x2, 2))))\n else:\n disf = distance_function\n dist = pd.DataFrame(index=df_row.index)\n for colname, cvals in df_col.iterrows():\n dist[colname] = df_row.apply(lambda x: disf(x, cvals.values), axis='columns')\n return dist", "def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D", "def dist_matrix(self, group1, group2):\n \n tmps = []\n for i in group2:\n tmps.append([])\n for j in group1:\n mi, label = self.distance(i, j)\n tmps[-1].append(mi)\n return tmps", "def compute_distances(self):\n if self.df is None:\n return\n\n self.origdist = []\n self.transdist = []\n for i in range(len(self.df)):\n for j in range(i+1, len(self.df)):\n self.origdist.append(distance(self.df['LPsol'].iloc[i], self.df['LPsol'].iloc[j]))\n self.transdist.append(distance(self.df[['x', 'y']].iloc[i], self.df[['x', 'y']].iloc[j]))", "def pairwise_distances(data):\n distances = []\n for x in data:\n distances_row = []\n for y in data:\n distances_row.append(metric(x, y)**2)\n distances.append(distances_row)\n return distances", "def sere_matrix(df, TH=1):\n number_samples = df.shape[1]\n columns = df.columns\n\n # Distance matrix\n distance = np.full((num_samples, num_samples), np.nan)\n for i in range(num_samples):\n for j in range(i, num_samples):\n distance.iloc[i, j] = sere_score(df.loc[:, [i, j]], TH)\n distance.iloc[j, i] = distance.iloc[i, j]\n\n return pd.DataFrame(distance, index=columns, columns=columns)", "def find_distances(frame, newPoint): \n distances = [] \n \n # iterate over all rows in the dataframe\n for index in range(frame.shape[0]):\n\n # get all columns of a row (except the label) \n point = frame.iloc[index,:-1] \n\n \t# compute the distance, then save distance and label \n # (use distance as first value)\n distance = euclidean_distance(point, newPoint)\n if distance == 0:\n distances.append((distance, frame.iloc[index,-1])) \n else:\n distances.append((sys.maxsize, frame.iloc[index,-1])) \n \n\n distances.sort() \n \n return distances", "def _compute_pairwise_distance(self, column: List[List[Token]]) -> np.array:\n pairwise = NeedlemanWunschAligner()\n l = len(column)\n distances = np.empty((l, l))\n for u in range(l):\n # compute only half of the distances\n for v in range(u, l):\n au, av = pairwise.align([column[u], column[v]]) # get aligned\n distances[u][v] = distances[v][u] = self.distance.compute(au, av)\n\n return distances", "def compute_distances(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n \n sum_test_square = np.sum(np.square(X), axis=1).reshape(-1, 1)\n sum_train_square = np.sum(np.square(self.X_train), axis=1).reshape(-1, 1)\n product_test_train = X @ self.X_train.T\n \n sum_test_square = np.repeat(sum_test_square, num_train, axis=1)\n sum_train_square = np.repeat(sum_train_square, num_test, axis=1).T\n \n dists_square = sum_test_square - 2 * product_test_train + sum_train_square\n \n dists = np.sqrt(dists_square)\n \n return dists", "def distance(self, x, y, keyboard_weight=None):\r\n dist_matrix = self.distance_matrix(x, y, keyboard_weight)\r\n return dist_matrix[-1, -1]", "def get_dist_mat(self):\n n_site = self.status.give(keyword=\"n_site\")\n sites = self.status.give(keyword=\"sites\")\n dist_mat = [[0.0 for j in xrange(n_site)] for i in xrange(n_site)]\n for i in xrange(n_site):\n for j in xrange(n_site):\n ri = sites[i].pos\n rj = sites[j].pos\n dist_mat[i][j] = np.linalg.norm(ri-rj)\n # print ri, rj\n return dist_mat", "def compute_distances_two_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n dists[i, j] = np.sqrt(np.sum(np.square(X[i] - self.X_train[j])))\n return dists", "def compute_distance(self, transpose=False):\n\n # Calculate distance matrix\n if transpose:\n distance_matrix = pdist(self.matrix.T, self.distance)\n else:\n distance_matrix = pdist(self.matrix, self.distance)\n\n # Remove NaNs\n distance_matrix[np.isnan(distance_matrix)] = 1.0\n\n return distance_matrix", "def distance(self, method=\"euclidean\", **kwargs):\n return Adjacency(\n pairwise_distances(self, metric=method, **kwargs), matrix_type=\"Distance\"\n )", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def coords_to_df(coords, columns=None):\n nb_dim = coords.shape[1]\n if columns is None:\n if nb_dim == 2:\n columns = ['x', 'y']\n elif nb_dim == 3:\n columns = ['x', 'y', 'z']\n else:\n columns = ['x'+str(i) for i in range(nb_dim)]\n \n nodes = pd.DataFrame(data=coords, columns=columns)\n return nodes", "def flatten_distance_matrix(dist):\n inds = np.triu_indices(dist.shape[0])\n return dist[inds]", "def compute(self,x,y):\n\n if(self.npaMatrix == None):\n raise Exception(\"\".join([\"MLPYDistanceAdaptor. Attempted to compute distance with out a distance matrix passed in during construction.\"]))\n return self.npaMatrix[x[0],y[0]]", "def compute_l2_distance_matrix(features_queries, features_dataset):\n sx = np.sum(features_queries ** 2, axis=1, keepdims=True)\n sy = np.sum(features_dataset ** 2, axis=1, keepdims=True)\n\n return np.sqrt(-2 * features_queries.dot(features_dataset.T) + sx + sy.T)", "def point_distances(src_points, gt_points):\n distances = EuclideanDistances(np.matrix(src_points), np.matrix(gt_points))\n return np.array(distances)", "def matrix2dataframe(keywords: List[str],\n matrix: np.ndarray,\n path: str = DISTANCE_XLSX\n ) -> pd.DataFrame:\n sem_dist_df = pd.DataFrame(\n data=matrix, index=keywords, columns=keywords\n )\n sem_dist_df.to_excel(path, index_label='Keywords')\n return sem_dist_df", "def distanceXandY(X,Y):\r\n distanceXY=[0]*len(X)\r\n hour=1\r\n while hour<(len(X)):\r\n distanceXY[hour]=math.sqrt((X[hour]-X[hour-1])**2 + (Y[hour]-Y[hour-1])**2)\r\n hour+=1\r\n return distanceXY", "def calculate_distance(df1, df2):\n my_bar = st.progress(0)\n total_length = df1.shape[0] * df2.shape[0]\n incr = 0.0\n output = {}\n for ind in df1.index:\n output[ind] = {}\n for ind2 in df2.index:\n output[ind][ind2] = cosine(df1.loc[ind], df2.loc[ind2])\n incr += 1.0\n my_bar.progress(incr / total_length)\n\n return pd.DataFrame(output).dropna(how=\"all\").dropna(how=\"all\", axis=1)", "def dist_matrix(self):\n return self.__dist_matrix", "def _freespace_matrix(distance):\n\n return np.array([[1., distance], [0., 1.]])", "def get_semantic_dist_matrix(target_words: List[str],\n word2onehot: Dict[str, int], \n matrices: Matrices\n ) -> np.ndarray:\n n = len(target_words)\n distance_matrix = np.zeros(shape=(n, n))\n for i in range(n):\n for j in range(i, n):\n vec1 = matrices.embedding[word2onehot[target_words[i]]]\n vec2 = matrices.embedding[word2onehot[target_words[j]]]\n distance = cosine_sim(\n vec1, vec2\n )\n distance_matrix[i][j] = distance\n distance_matrix[j][i] = distance\n return distance_matrix", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)", "def vec2adjmat(source, target, symmetric=True):\n df = pd.DataFrame(np.c_[source, target], columns=['source', 'target'])\n # Make adjacency matrix\n adjmat = pd.crosstab(df['source'], df['target'])\n # Get all unique nodes\n # nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten())\n nodes = np.unique(list(adjmat.columns.values) + list(adjmat.index.values))\n\n # Make the adjacency matrix symmetric\n if symmetric:\n # Add missing columns\n node_columns = np.setdiff1d(nodes, adjmat.columns.values)\n for node in node_columns:\n adjmat[node]=0\n\n # Add missing rows\n node_rows = np.setdiff1d(nodes, adjmat.index.values)\n adjmat=adjmat.T\n for node in node_rows:\n adjmat[node]=0\n adjmat=adjmat.T\n\n # Sort to make ordering of columns and rows similar\n [IA, IB] = ismember(adjmat.columns.values, adjmat.index.values)\n adjmat = adjmat.iloc[IB, :]\n adjmat.index.name='source'\n adjmat.columns.name='target'\n\n return(adjmat)", "def vec2adjmat(source, target, symmetric=True):\n df = pd.DataFrame(np.c_[source, target], columns=['source', 'target'])\n # Make adjacency matrix\n adjmat = pd.crosstab(df['source'], df['target'])\n # Get all unique nodes\n # nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten())\n nodes = np.unique(list(adjmat.columns.values) + list(adjmat.index.values))\n\n # Make the adjacency matrix symmetric\n if symmetric:\n # Add missing columns\n node_columns = np.setdiff1d(nodes, adjmat.columns.values)\n for node in node_columns:\n adjmat[node]=0\n\n # Add missing rows\n node_rows = np.setdiff1d(nodes, adjmat.index.values)\n adjmat=adjmat.T\n for node in node_rows:\n adjmat[node]=0\n adjmat=adjmat.T\n\n # Sort to make ordering of columns and rows similar\n [IA, IB] = ismember(adjmat.columns.values, adjmat.index.values)\n adjmat = adjmat.iloc[IB, :]\n adjmat.index.name='source'\n adjmat.columns.name='target'\n\n return(adjmat)", "def from_dataframe(df):\n X = sm.add_constant(np.array(df['x']))\n y = np.array(df['y']).reshape(-1,1)\n return y, X", "def _generate_distance_kernel_matrix(self):\n with self._rw_lock.read_lock():\n # Create matrix whose elements are the distances between all row\n # permutations\n fmat = self._feature_mat # shorter name\n num_rows = fmat.shape[0]\n\n # distance kernel is a square matrix based on feature samples\n dist_kernel = np.mat(np.ndarray((num_rows,)*2))\n self._log.info(\"Creating distance kernel with shape %s\",\n dist_kernel.shape)\n\n timer_log = logging.getLogger('.'.join((self.__module__,\n self.__class__.__name__,\n \"SimpleTimer\")))\n\n for i in xrange(num_rows - 1):\n with SimpleTimer('computing distances from row %d to [%d-%d]'\n % (i, i+1, num_rows-1), timer_log):\n dist_kernel[i, i] = 1.0\n for j in xrange(i + 1, num_rows):\n dist = self._histogram_intersection_distance(fmat[i],\n fmat[j])\n dist_kernel[i, j] = dist_kernel[j, i] = dist\n dist_kernel[-1, -1] = 1.0\n return dist_kernel", "def create_similarity_matrix_euclid(matrix):\n similarity_matrix_euclid = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)\n\n for business1 in matrix.index:\n for business2 in matrix.index:\n similarity_matrix_euclid[business1][business2] = similarity_euclid(matrix, business1, business2)\n\n return similarity_matrix_euclid", "def matrix_dist(self):\n matrix_dic = {}\n for clus in self.clusters:\n for other_clus in self.clusters:\n if clus.samples[0].s_id > other_clus.samples[0].s_id: # avoid duplicates\n matrix_dic[(clus.samples[0].s_id, other_clus.samples[0].s_id)] = clus.samples[0]\\\n .compute_euclidean_distance(other_clus.samples[0])\n return matrix_dic", "def DTWDistance(s1, s2):\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n _dtw_mat = np.empty([len_s1, len_s2])\n _dtw_mat[0, 0] = abs(s1[0] - s2[0])\n\n # two special cases : filling first row and columns\n\n for j in range(1, len_s2):\n dist = abs(s1[0] - s2[j])\n _dtw_mat[0, j] = dist + _dtw_mat[0, j - 1]\n\n for i in range(1, len_s1):\n dist = abs(s1[i] - s2[0])\n _dtw_mat[i, 0] = dist + _dtw_mat[i - 1, 0]\n\n #  filling the matrix\n for i in range(1, len_s1):\n for j in range(1, len_s2):\n dist = abs(s1[i] - s2[j])\n _dtw_mat[(i, j)] = dist + min(\n _dtw_mat[i - 1, j], _dtw_mat[i, j - 1], _dtw_mat[i - 1, j - 1]\n )\n\n return _dtw_mat[len_s1 - 1, len_s2 - 1]", "def get_neighbor_distances_data_frame(\n cell_structure: pmg.Structure, r: float\n) -> pd.DataFrame:\n all_neighbors: AllNeighborDistances = cell_structure.get_all_neighbors(\n r=r, include_index=True\n )\n\n neighbor_distances: NeighborDistances = extract_neighbor_distance_data(\n cell_structure=cell_structure, all_neighbors=all_neighbors\n )\n\n return pd.DataFrame(data=neighbor_distances)", "def read_in_distance_matrix(path: str = DISTANCE_XLSX\n ) -> pd.DataFrame:\n dist_matrix = pd.read_excel(path, index_col=0)\n return dist_matrix", "def TransformDistance(*args, **kwargs):\n return _gdi_.GraphicsMatrix_TransformDistance(*args, **kwargs)", "def _calculate_similarities(self) -> pd.DataFrame:\n\n df_encoded_articles = self._db_connection.get_dataframe(\n table_name='tfidf_representation',\n schema='encoded_articles'\n ).set_index('id')\n\n # Pandas loads the array column 'encoded' as a string e.g. \"[0.0, 0.6, 0.8]\" which needs translating to an array\n encoded_representations = np.array(df_encoded_articles['encoded'].tolist())\n\n return pd.DataFrame(\n index=df_encoded_articles.index,\n columns=df_encoded_articles.index,\n data=pairwise.cosine_similarity(encoded_representations)\n )", "def euclidean_distance(data1, data2):\n #Convert data into numpy array\n array1 = np.array(data1)\n array2 = np.array(data2)\n \n #Create distance array\n dist_array = np.sqrt(np.sum((array2-array1)**2, axis=1))\n \n #Reshape array before return results\n return np.reshape(dist_array, [len(dist_array),1])", "def embedding_distance_bulk(\n embeddings1: Embedding,\n embeddings2: Embedding,\n distance_metric: DistanceMetric) -> np.ndarray:\n if distance_metric == DistanceMetric.EUCLIDEAN_SQUARED:\n return np.square(\n paired_distances(\n embeddings1,\n embeddings2,\n metric='euclidean'))\n elif distance_metric == DistanceMetric.ANGULAR_DISTANCE:\n # Angular Distance: https://en.wikipedia.org/wiki/Cosine_similarity\n similarity = 1 - paired_distances(\n embeddings1,\n embeddings2,\n metric='cosine')\n return np.arccos(similarity) / math.pi", "def calculate_all_distances_to_center(self):\n all_distances = pd.DataFrame()\n for label in np.unique(self.embedding_df['cluster']): \n distance_df = self.calculate_distances_for_cluster(label)\n all_distances = pd.concat([all_distances, distance_df])\n \n self.embedding_df = self.embedding_df.merge(all_distances, left_index=True, right_index=True)", "def getDistanceM(self, test, train):\n p = 2 # TUNE currently euclidian distance\n distanceM = pd.DataFrame(index=test.index.values, columns=train.index.values)\n for testrow, testing in test.iterrows():\n for trainrow, training in train.iterrows():\n tot = 0\n for indexc, column in test.iteritems():\n #print(indexc)\n if indexc in self.discrete: # need to reference VDM\n datapoint = self.VDMdict.get(indexc)\n dif = datapoint[testing[indexc]][training[indexc]]\n elif indexc != \"class\": #get distance beween 2 points\n dif = abs(float(testing[indexc]) - float(training[indexc]))\n\n tot += dif ** p\n distance = tot ** (1 / p) #distance is calculated\n distanceM.at[testrow, trainrow] = distance #put in distance matrix\n return(distanceM)", "def euclidean_distance_2d(XA: np.ndarray, XB: np.ndarray):\n out = np.empty((XA.shape[0], XB.shape[0]), dtype=XA.dtype)\n for i in numba.prange(XA.shape[0]):\n for j in range(XB.shape[0]):\n out[i, j] = np.sqrt((XA[i, 0] - XB[j, 0]) ** 2 + (XA[i, 1] - XB[j, 1]) ** 2)\n return out", "def decompress(matrix_y, matrix_w):\n rows, cols = len(matrix_y[0]), len(matrix_y)\n result_array = np.zeros((rows, cols))\n for i in range(rows):\n for k in range(cols):\n result_array[i] = result_array[i] + matrix_w[k] * matrix_y[k][i]\n return DataFrame(result_array)", "def distance(x, y):\n dist = []\n for i in range(0, len(x)):\n dist.append(abs(x[i]-y[i]))\n return dist", "def distance_coord(df):\n temp_list_distance=[]\n list_distance=[]\n for i in range(len(df)-1):\n coord1 = (df['lat'][i], df['lon'][i])\n coord2 = (df['lat'][i+1], df['lon'][i+1])\n dist = geopy.distance.geodesic(coord1, coord2).km\n temp_list_distance.append(dist)\n list_distance.append(sum(temp_list_distance)) \n return(list_distance)", "def pairwise_euclidean_distance(x, y):\n x_norm = (x ** 2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y ** 2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return torch.sqrt(torch.clamp(dist, 0.0, np.inf))", "def distance(x, y):\n dist = [pow((x-y), 2) for x, y in zip(x,y)]\n dist = math.sqrt(sum(dist))\n \n return dist", "def distances(a, b):\n # generating matrix\n matrix = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # base case\n for i in range(1, len(a) + 1):\n matrix[i][0] = (i, Operation.DELETED)\n for j in range(1, len(b) + 1):\n matrix[0][j] = (j, Operation.INSERTED)\n\n # fill in matrix with tuples (cost, operation)\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n # edit distance algorithm\n # costs for deletion, insertion and substitution\n delete_cost = matrix[i - 1][j][0] + 1\n insert_cost = matrix[i][j - 1][0] + 1\n substitute_cost = matrix[i - 1][j - 1][0]\n if a[i - 1] != b[j - 1]:\n substitute_cost += 1\n\n # edit distance is min cost of deletion, insertion, substitution\n if delete_cost < insert_cost and delete_cost < substitute_cost:\n matrix[i][j] = (delete_cost, Operation.DELETED)\n elif insert_cost < substitute_cost:\n matrix[i][j] = (insert_cost, Operation.INSERTED)\n else:\n matrix[i][j] = (substitute_cost, Operation.SUBSTITUTED)\n\n return matrix", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)" ]
[ "0.69271284", "0.67564255", "0.6533258", "0.6516786", "0.6428106", "0.63869244", "0.6351963", "0.6319464", "0.63100487", "0.62420344", "0.62378067", "0.62373847", "0.62171084", "0.62104243", "0.62080455", "0.6201593", "0.61032706", "0.6101561", "0.60353494", "0.6023481", "0.5988664", "0.5987141", "0.59437454", "0.5932476", "0.5913393", "0.59105027", "0.5896201", "0.58813703", "0.58646", "0.5776097", "0.5776097", "0.5752289", "0.5751951", "0.5741843", "0.57363117", "0.5732768", "0.573044", "0.57140946", "0.5706414", "0.5704287", "0.56872773", "0.5669531", "0.56521857", "0.565108", "0.5644969", "0.56365186", "0.5607682", "0.5585505", "0.55636764", "0.55631477", "0.55619997", "0.55484045", "0.55393744", "0.5516402", "0.5507602", "0.5501298", "0.54978406", "0.5496234", "0.54763275", "0.5462478", "0.54584336", "0.5448691", "0.5443344", "0.5436396", "0.5422069", "0.5411231", "0.5399277", "0.5391993", "0.53851515", "0.53811324", "0.5367055", "0.5366455", "0.53656876", "0.53573704", "0.5357244", "0.5339818", "0.5334816", "0.53332883", "0.53332883", "0.53324705", "0.53289586", "0.5321775", "0.5319581", "0.5312305", "0.52961046", "0.529397", "0.52840024", "0.52458066", "0.52374095", "0.52253675", "0.5215751", "0.5214948", "0.5206907", "0.5206718", "0.52013266", "0.51936734", "0.51904243", "0.5180279", "0.5177499", "0.5174357" ]
0.8356927
0
Calculate similarity of two words Return a number between 0 and 1 (1 means same and 0 means fully different)
def similarity(self, x, y, keyboard_weight=None): dist = self.distance(x, y, keyboard_weight) max_len = max(len(x), len(y)) max_dissimilarity = max_len * self.scale_coef similarity = 1 - dist / max_dissimilarity return similarity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wordSimilarityRatio(sent_1,sent_2):", "def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))", "def similar_text(word1, word2) -> float:\n\n return textdistance.overlap.similarity(word1, word2)", "def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim", "def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()", "def calc_similarity_between_words(word1, word2):\n # pos = wn.Noun is mandatory otherwise the lowest common hypernym cant be found because of part of speach\n word1_synsets = wn.synsets(word1, pos=wn.NOUN)\n word2_synsets = wn.synsets(word2, pos=wn.NOUN)\n\n w1 = get_words_from_sysets(word1_synsets)\n w2 = get_words_from_sysets(word2_synsets)\n\n sim_matrix = np.zeros((len(w1), len(w2)))\n\n for i in range(len(w1)):\n for j in range(len(w2)):\n try:\n sim_matrix[i, j] = embeddings.distances(w1[i], [w2[j]])\n except KeyError:\n sim_matrix[i, j] = 1000\n continue\n\n w1_ind, w2_ind = np.unravel_index(np.nanargmin(sim_matrix, axis=None), sim_matrix.shape)\n lowest_common_hyp = (word1_synsets[w1_ind]).lowest_common_hypernyms(word2_synsets[w2_ind])\n return (sim_matrix[w1_ind, w2_ind], lowest_common_hyp)", "def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.", "def get_similar_score(a, b):\n\n # Count the amount of words that A and B have in common\n commons = get_common_words_count(a, b)\n\n # Compute the amount of common words, divided by the log\n # the length of sentence 1 plus the length of sentence 2.\n # This means that higher similarity weights will be given\n # to longer sentences up to the asymptote of log10\n\n if len(a) > 0 and len(b) > 0:\n log_denom = log10(len(a) * len(b))\n else:\n return 0\n\n # Avoid division by zero\n if log_denom == 0:\n return 0\n\n return commons / log_denom", "def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))", "def similarity(text1, text2):\n\n clean1 = clean(text1)\n clean2 = clean(text2)\n count_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=CountVectorizer)\n tfidt_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=TfidfVectorizer)\n similarity_dict = {'count': count_meas, 'tfidf': tfidt_meas}\n return similarity_dict", "def get_similarity(s1, s2):\n t0 = sorted(list(set(s1.split(' ')).intersection(set(s2.split(' ')))))\n t1 = sorted(list(set(t0 + s1.split(' '))))\n t2 = sorted(list(set(t0 + s2.split(' '))))\n\n r01 = SequenceMatcher(None, t0, t1).ratio()\n r02 = SequenceMatcher(None, t0, t2).ratio()\n r12 = SequenceMatcher(None, t1, t2).ratio()\n return max(r01, r02, r12)", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def compute_similarity(self, text1, text2):\n\n text1_dist = self.predict(text1)[0]\n text2_dist = self.predict(text2)[0]\n return jensenshannon(text1_dist, text2_dist)", "def sentence_similarity(self,sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n vals = [synset.path_similarity(ss) if synset.path_similarity(ss) is not None else 0 for ss in synsets2]\n best_score = max(vals,default=0)\n\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n # Average the values\n if count == 0:\n return 0\n score /= count\n return score", "def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))", "def string_similarity_score(left: str, right: str):\n return SequenceMatcher(None, left, right).ratio()", "def sentence_similarity(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n best_score = max([synset.path_similarity(ss) or 0 for ss in synsets2])\n\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n\n # Average the values\n score /= count\n return score", "def string_similarity(item_1, item_2):\n return SequenceMatcher(None, item_1.lower(), item_2.lower()).ratio()", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)", "def sentence_similarity_asym(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n pathsim = [synset.path_similarity(ss) for ss in synsets2]\n if len(pathsim) == 0:\n #print sentence1, sentence2\n pathsim = [0]\n best_score = max(pathsim)\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n # Average the values\n score /= count\n return score", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)", "def sentence_similarity(self,wnsimilarity,sentence1, sentence2,icneed=False):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n \n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets1:\n \n # Get the similarity value of the most similar word in the other sentence\n score_list=[]\n if icneed == True :\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss,self.brown_ic)\n score_list.append(temp)\n except:\n continue\n \n else:\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss)\n score_list.append(temp)\n except:\n continue\n \n \n score_list = np.array(score_list, dtype=np.float64)\n score_list = np.nan_to_num(score_list)\n# print(score_list)\n if len(score_list)>0:\n best_score = np.nanmax(score_list)\n else:\n best_score=0.0\n# print(best_score)\n# print(type(best_score))\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score =score + best_score\n# print(score)\n count = count+ 1\n \n \n# print(\"one sentence over\")\n # Average the values\n score /= count\n return score", "def sentence_similarity(sentence1, sentence2):\n sentence1 = sentence1.tags\n sentence2 = sentence2.tags\n \n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n for synset in synsets1:\n \n li=[synset.path_similarity(ss) for ss in synsets2]\n m=0\n for i in range(len(li)):\n if li[i] is not None and m<li[i]:\n m=li[i]\n if m != 0:\n score += m\n count += 1\n\n if count is 0:\n score = 0\n else:\n score /= count\n return score", "def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim", "def text_proximity(str_1: str, str_2: str) -> float:\n tokens_1 = Counter(str_1.split(' '))\n tokens_2 = Counter(str_2.split(' '))\n return _normalized_scalar_product(tokens_1, tokens_2)", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def similarity(self, wf, positions = None, features = None):\n # The similarity is the inverse square of the distance between the two\n # WordForms. Impose a minimum on distances (to deal with zero).\n dist = self.distance(wf, positions = positions, features = features)\n if dist < .1:\n dist = .1\n sim = 1 / (dist ** 2)\n return sim", "def lev_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity measure\n return measure.get_sim_score(s1, s2)", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def lemmas_similarity(s1, s2, filter_stop_words=True):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n \n if not filter_stop_words:\n tokenized_sentence_1 = [token for token in tokenized_sentence_1 if token not in stop_words]\n tokenized_sentence_2 = [token for token in tokenized_sentence_2 if token not in stop_words]\n \n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2] # [LEMMA_1, ...]\n \n # Compute similarity\n if len(lemmas_sentence_1) > 0 and len(lemmas_sentence_2) > 0:\n similarity = 1 - jaccard_distance(set(lemmas_sentence_1), set(lemmas_sentence_2))\n # Compute label of similarity \n return similarity\n else:\n return 0", "def heuristic_2(a: str, b: str) -> float:\n # generate term-document matrices\n if get_intro(a) == \"\" or get_intro(b) == \"\":\n return 2\n else:\n corpus = [get_intro(a), get_intro(b)]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n # return cosine similarity\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2", "def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim", "def term_similarity(node_a, node_b, embeddings):\n try:\n similarity = embeddings.similarity(node_a.__str__(), node_b.__str__())\n except Exception as e:\n # If term(s) does not occur in embedding similarity is always 0.\n similarity = 0\n logging.info(e)\n return similarity", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)", "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def distance_unigrams_same(t1, t2):\n t1_terms = make_terms_from_string(t1)\n t2_terms = make_terms_from_string(t2)\n terms1 = set(t1_terms)\n terms2 = set(t2_terms)\n shared_terms = terms1.intersection(terms2)\n #print(shared_terms)\n all_terms = terms1.union(terms2)\n #print(all_terms)\n dist = 1.0 - (len(shared_terms) / float(len(all_terms)))\n return dist", "def synSimilarity(self, wSet1, wSet2): \n nW1 = len(wSet1)\n nW2 = len(wSet2)\n if nW1 == 0 or nW2 == 0:\n return 0.0\n synonyms1 = self.getSynonyms(wSet1)\n synonyms2 = self.getSynonyms(wSet2)\n \n # easy bit: find the number of identical words in each mention\n intersection = wSet1.intersection(wSet2)\n # now remove these words and look for synonyms between those left\n w1 = wSet1 - intersection\n w2 = wSet2 - intersection\n while len(w1) > 0:\n word1 = w1.pop()\n if word1 not in synonyms1:\n continue # no synonyms for this word\n \n for word2 in w2:\n if word2 not in synonyms2:\n continue # no synonyms for this word\n sharedSynsets = synonyms1[word1].intersection(synonyms2[word2])\n if len(sharedSynsets) > 0:\n # the two have at least one synset in common, consider them synonyms\n w2.remove(word2)\n intersection.add(word1)\n \n break\n return float(2*len(intersection)) / (nW1 + nW2)", "def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):\n\n assert char1 in self.char_dict\n assert char2 in self.char_dict\n shape_w, sound_w, freq_w = weights\n\n if char1 in self.char_dict and char2 in self.char_dict:\n\n shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)\n sound_sim = self.pronunciation_similarity(char1, char2)\n freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)\n\n return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w\n else:\n return 0.0", "def jaccard_similarity(text1, text2):\r\n\r\n set1 = set(text1.split());\r\n set2 = set(text2.split());\r\n\r\n num = set.intersection(set1, set2);\r\n denom = set.union(set1, set2);\r\n\r\n return len(num)/len(denom);", "def jaccard_similarity(string1, string2):\n\n a = set(string1.split())\n b = set(string2.split())\n\n similarity = float(\n len(a.intersection(b)) * 1.0\n / len(a.union(b)))\n\n return similarity", "def wup_measure(self,a, b, similarity_threshold = 0.925, debug = False):\n if debug: print('Original', a, b)\n #if word_pair_dict.has_key(a+','+b):\n if a+','+b in self.word_pair_dict.keys():\n return self.word_pair_dict[a+','+b]\n\n def get_semantic_field(a):\n return wn.synsets(a, pos=wn.NOUN)\n\n if a == b: return 1.0\n\n interp_a = get_semantic_field(a)\n interp_b = get_semantic_field(b)\n if debug: print(interp_a)\n\n if interp_a == [] or interp_b == []:\n return 0.0\n\n if debug: print('Stem', a, b)\n global_max=0.0\n for x in interp_a:\n for y in interp_b:\n local_score=x.wup_similarity(y)\n if debug: print('Local', local_score)\n if local_score > global_max:\n global_max=local_score\n if debug: print('Global', global_max)\n\n # we need to use the semantic fields and therefore we downweight\n # unless the score is high which indicates both are synonyms\n if global_max < similarity_threshold:\n interp_weight = 0.1\n else:\n interp_weight = 1.0\n\n final_score = global_max * interp_weight\n self.word_pair_dict[a+','+b] = final_score\n return final_score", "def compare_words(word1, word2):\n word1 = word1.lower()\n word2 = word2.lower()\n seg_scores = []\n if len(word1) >= len(word2):\n for i in range(0, len(word1) - len(word2) + 1):\n seg_scores.append(find_difference(word1[i:i+len(word2)], word2))\n else:\n for i in range(0, len(word2) - len(word1) + 1):\n seg_scores.append(find_difference(word2[i:i+len(word1)], word1))\n return round(min(seg_scores) + abs(len(word1) - len(word2))/float(len(max([word1, word2]))),2)", "def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def simple_baseline_similarity(s1, s2):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n\n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1 if not tagged_word in stop_words] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2 if not tagged_word in stop_words] # [LEMMA_1, ...]\n \n word_seq_match = difflib.SequenceMatcher(None, tokenized_sentence_1, tokenized_sentence_2)\n word_match = word_seq_match.find_longest_match(0, len(tokenized_sentence_1), 0, len(tokenized_sentence_2))\n\n lemm_seq_match = difflib.SequenceMatcher(None, lemmas_sentence_1, lemmas_sentence_2)\n lemm_match = lemm_seq_match.find_longest_match(0, len(lemmas_sentence_1), 0, len(lemmas_sentence_2))\n\n word_sim = word_match.size/(max(len(tokenized_sentence_1), len(tokenized_sentence_2)) + 0.001)\n lemm_sim = lemm_match.size/(max(len(lemmas_sentence_1), len(lemmas_sentence_2)) + 0.001)\n\n return word_sim, lemm_sim", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def needleman_wunsch(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.NeedlemanWunsch()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity measure\n return measure.get_raw_score(s1, s2)", "def scientific_match_ratio(str1, str2, keywords):\n\n # Get rid of the numbers\n str1_numberless = remove_numbers(str1)\n str2_numberless = remove_numbers(str2)\n\n # Get the keywords and whatever remains after removing the keywords\n str1_keywords, str1_remainder = get_common_words_in_description(str1_numberless, keywords)\n str2_keywords, str2_remainder = get_common_words_in_description(str2_numberless, keywords)\n\n remainder_dist = string_num_matches(str1_remainder, str2_remainder)\n common_keywords = str1_keywords.intersection(str2_keywords)\n\n common_keyword_total_len = 0\n for common_kword in common_keywords:\n common_keyword_total_len += len(common_kword)\n\n return (remainder_dist + common_keyword_total_len) * 1.0 / max(len(str1_numberless), len(str2_numberless))", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost", "def count_words(title_pair: np.array) -> float:\r\n title_1, title_2 = title_pair\r\n # Transform into sets of words\r\n title_1 = set(title_1.split())\r\n title_2 = set(title_2.split())\r\n # Divide length of intersection by length of union\r\n ratio = len(title_1.intersection(title_2)) / len(title_1.union(title_2))\r\n return ratio", "def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs", "def cosine_similarity_tensorflow(tf_word_representation_A, tf_words_representation_B):\n a_normalized = tf.nn.l2_normalize(tf_word_representation_A, axis=-1)\n b_normalized = tf.nn.l2_normalize(tf_words_representation_B, axis=-1)\n similarity = tf.reduce_sum(\n tf.multiply(a_normalized, b_normalized), \n axis=-1\n )\n \n return similarity", "def distance_bigrams_same(t1, t2):\n t1_terms = make_terms_from_string(t1)\n t2_terms = make_terms_from_string(t2)\n terms1 = set(ngrams(t1_terms, 2)) # was using nltk.bigrams\n terms2 = set(ngrams(t2_terms, 2))\n shared_terms = terms1.intersection(terms2)\n #print(shared_terms)\n all_terms = terms1.union(terms2)\n #print(all_terms)\n dist = 1.0\n if len(all_terms) > 0:\n dist = 1.0 - (len(shared_terms) / float(len(all_terms)))\n return dist", "def name_similarity(name_1, name_2, nickname_2=None):\n name_similarity = string_similarity(name_1, name_2)\n nickname_similarity = 0\n if nickname_2:\n nickname_similarity = string_similarity(name_1, nickname_2)\n return max(name_similarity, nickname_similarity)", "def get_fuzz_ratio(first_word, second_word):\n return fuzz.ratio(first_word, second_word), first_word, second_word", "def text_similarity(this_text, other_text, shingle_length=5, minhash_size=200, random_seed=5):\n this_shingles = ShingledText(this_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n other_shingles = ShingledText(other_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n return this_shingles.similarity(other_shingles)", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def compare_vectors(word_vector1, word_vector2):\n all_words = list(set(word_vector1).union(set(word_vector2)))\n frequency_dict1 = word_frequencies(word_vector1)\n frequency_dict2 = word_frequencies(word_vector2)\n\n frequency_vector1 = [frequency_dict1.get(word, 0) for word in all_words]\n frequency_vector2 = [frequency_dict2.get(word, 0) for word in all_words]\n\n return similarity(frequency_vector1, frequency_vector2)", "def lcs_similarity(s1, s2):\n max_len = 0\n i = 0\n\n while s1[i] == s2[i]:\n max_len += 1\n i += 1\n if len(s1) == i or len(s2) == i:\n break\n\n if len(s1) < len(s2):\n return float(max_len) / len(s2)\n else:\n return float(max_len) / len(s1)", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def symmetric_sentence_similarity(self, sentence1, sentence2):\n return (self.sentence_similarity(sentence1, sentence2) + self.sentence_similarity(sentence2, sentence1)) / 2", "def semanticSimilarity(self, text1, text2, distanceMeasure = \"cosine\"):\n return self._er.jsonRequestAnalytics(\"/api/v1/semanticSimilarity\", { \"text1\": text1, \"text2\": text2, \"distanceMeasure\": distanceMeasure })", "def wn_concreteness(word, similarity_fn=wn.path_similarity):\n syns = wn.synsets(word)\n dists = [1 - similarity_fn(s, s.root_hypernyms()[0]) for s in syns]\n return np.median(dists), max(dists)", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def token_stopword_match(a, b, threshold=0.5):\r\n \r\n pos_a = map(get_wordnet_pos, nltk.pos_tag(tokenizer(a)))\r\n pos_b = map(get_wordnet_pos, nltk.pos_tag(tokenizer(b)))\r\n a = [lemmatizer.lemmatize(token.lower(), pos) for token, pos in pos_a\r\n if pos == wordnet.NOUN and token.lower() not in stopwords]\r\n b = [lemmatizer.lemmatize(token.lower(), pos) for token, pos in pos_b\r\n if pos == wordnet.NOUN and token.lower() not in stopwords]\r\n \r\n \r\n # Calculate Jaccard similarity\r\n ratio = len(set(a).intersection(b)) / float(len(set(a).union(b)))\r\n\r\n # return (ratio >= threshold)\r\n return (ratio)", "def doc_doc_similarity(matrix_a, matrix_b):\n assert matrix_a.shape[1] == matrix_b.shape[0], \"Mismatched shape between matrix A and matrix B\"\n numerator = np.dot(matrix_a, matrix_b)\n assert numerator.shape == (matrix_a.shape[0], matrix_b.shape[1]), numerator.shape\n denominator = np.sqrt(np.sum(matrix_a ** 2, axis=1))[:, np.newaxis] * np.sqrt(\n np.sum(matrix_b.T ** 2, axis=1))[:, np.newaxis].T\n assert (denominator > 0).all(), \"Denominator is zero {}\".format(denominator)\n similarity_matrix = np.multiply(numerator, 1 / denominator)\n return similarity_matrix", "def most_similar_word(self,word, word_set):\n\t max_sim = -1.0\n\t sim_word = \"\"\n\t for ref_word in word_set:\n\t sim = self.word_similarity(word, ref_word)\n\t if sim > max_sim:\n\t max_sim = sim\n\t sim_word = ref_word\n\t return sim_word, max_sim", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def similarity(self, new_sentence):\n cleaned = self.clean_string(new_sentence)\n stemmed = self.stem(cleaned, train=False)\n\n if not set(stemmed).intersection(set(self.vocabulary.keys())):\n return None\n\n else:\n difference = set(stemmed) - set(self.vocabulary.keys())\n to_append = np.zeros((self.matrix.shape[0], len(difference)))\n matrix = np.append(self.matrix, to_append, axis=1)\n\n new_voc = copy.deepcopy(self.vocabulary)\n for word in difference:\n if word not in new_voc:\n new_voc[word] = len(new_voc)\n\n question_vector = self.stem2vec(stemmed, new_voc)\n result = np.matmul(matrix, question_vector)\n return np.argmax(result)", "def check_analogy(word1, word2, word3, word4, model):\n LoM = model.most_similar(positive=[word2, word3], negative=[word1], topn=100)\n LoWords = []\n for x in LoM:\n LoWords += [x[0]]\n if word4 not in LoWords:\n return 0\n else:\n score = 100\n for x in LoWords:\n if x != word4:\n score += -1\n else:\n return score", "def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score", "def find_similarity(message1, message2):\n total = 0\n for i in range(len(message1)):\n max = 0\n for j in range(len(message2)):\n message1_encoded = embed([message1[i]])\n message2_encoded = embed([message2[j]])\n sim = average_similarity(message1_encoded, message2_encoded)\n if sim > max:\n max = sim\n total += max\n return total/len(message1)", "def check_similarity(pair):\n user, business = pair['user_id'], pair['business_id']\n similarity = -1\n try:\n user_text = eval(user_profile[user])\n business_text = eval(business_profile[business])\n similarity = cosine_similarity(user_text, business_text)\n except:\n pass\n return similarity if similarity != -1 else 0", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def symmetric_sentence_similarity(self,wnsimilarity,sentence1, sentence2,icneed=False):\n if icneed==True:\n return (self.sentence_similarity(wnsimilarity,sentence1, sentence2,icneed=True) + self.sentence_similarity(wnsimilarity,sentence2, sentence1,icneed=True)) / 2 \n else:\n return (self.sentence_similarity(wnsimilarity,sentence1, sentence2) + self.sentence_similarity(wnsimilarity,sentence2, sentence1)) / 2", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def is_words_similar(string, model):\n\n if fuzz.ratio(string, model, score_cutoff=75):\n return True\n\n return False", "def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)", "def compare_words(self, word1, word2):\n return Counter(word1) == Counter(word2)", "def compare_word_selection(selection1,selection2):\n\n num_extra=0;\n num_total=0;\n total_score=0;\n for k in selection1.keys():\n num_total+=1;\n if k not in selection2:\n num_extra+=1;\n else:\n score=compare_sentences(selection1[k],selection2[k]);\n logging.info(\"Score: %f\" % score );\n total_score+=score;\n\n for k in selection2.keys():\n if k not in selection1:\n num_total+=1;\n num_extra+=1;\n\n if num_total==0:\n return 0;\n\n return float(total_score)/float(num_total)", "def similarity_with(self, other_text_analyzer):\n pass" ]
[ "0.84911114", "0.8240029", "0.8203488", "0.80538607", "0.7916579", "0.78859663", "0.78573513", "0.78237593", "0.78181165", "0.7802038", "0.7788888", "0.77773386", "0.7673199", "0.76593477", "0.760347", "0.7594198", "0.7564784", "0.7561484", "0.7549728", "0.7532246", "0.7500044", "0.7471467", "0.73921895", "0.73921895", "0.7387371", "0.7318624", "0.7310381", "0.7300879", "0.72922146", "0.7292042", "0.72904617", "0.72790873", "0.7247418", "0.72413874", "0.7233605", "0.72234255", "0.7211247", "0.7208172", "0.71883893", "0.7187115", "0.7158982", "0.7157663", "0.7157663", "0.7129959", "0.70963836", "0.7089297", "0.7081865", "0.7078233", "0.7070133", "0.70638955", "0.7054292", "0.70448565", "0.70434904", "0.70396674", "0.70171475", "0.7004158", "0.6989737", "0.6989737", "0.69759405", "0.696557", "0.69626343", "0.69624424", "0.6934117", "0.6913725", "0.691323", "0.69116414", "0.6895093", "0.6889268", "0.688324", "0.6877121", "0.68758863", "0.68731713", "0.686849", "0.684837", "0.6838484", "0.6835362", "0.6818634", "0.6817954", "0.68088776", "0.68033415", "0.68024945", "0.6802357", "0.67982966", "0.6792254", "0.6772761", "0.6750343", "0.6749168", "0.67456836", "0.67416096", "0.67384315", "0.6737959", "0.67317414", "0.6720062", "0.6711617", "0.67032444", "0.66980815", "0.66940546", "0.66829246", "0.66656196", "0.6665229" ]
0.71260786
44
Updates this Role instance
def update(self, permission, **kwargs): kwargs['permission'] = permission return self.update_instance(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.update_role(model)", "def update(self, role):\n self._router_request(\n self._make_request_data(\n 'updateAdminRole',\n data=dict(\n params=dict(\n uid=self.uid,\n name=self.name,\n role=role\n )\n )\n )\n )\n\n self.role = role\n\n return True", "def update_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "def save(self):\n body = {}\n body[\"permissions\"] = dict(self.permissions)\n body[\"name\"] = self.name\n body[\"description\"] = self.description\n _, role = self._requestor.patch('/roles/' + self._id, json=body)\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])", "async def update(self, ctx):\n\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n # fetch the discord message\n guild_id = await self.get_deep_data(data, \"bot__server__uid\")\n\n guild = self.get_guild(int(guild_id))\n channel = self.get_channel(guild, int(data.message.cuid))\n message = await channel.fetch_message(int(data.message.uid))\n\n # update the message\n await message.edit(content=\"_ _\", embed=self.create_message_embed(data))\n\n await self.update_reactions(message, data)\n\n await ctx.send(\"Updated.\")", "async def save(self):\n await config.member(self.member).set_raw(str(self.role.id), value=self.as_dict)", "def updateRole(self, role_id, title, description):\n self._roles[role_id].update({'title': title,\n 'description': description})", "def manage_updateRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n self.updateRole(role_id, title, description)\n\n message = 'Role+updated'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?role_id=%s&'\n 'manage_tabs_message=%s' %\n (self.absolute_url(), role_id, message))", "def update(self, role, timeout=None):\n req = RoleUpdateRequest()\n\n if role is not None:\n req.role.CopyFrom(plumbing.convert_role_to_plumbing(role))\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Update(\n req,\n metadata=self.parent.get_metadata('Roles.Update', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleUpdateResponse()\n resp.meta = plumbing.convert_update_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role = plumbing.convert_role_to_porcelain(plumbing_response.role)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def put(self, id):\n data = request.json\n role = Role.query.filter(Role.id == id).one()\n if 'description' in data:\n role.description = data.get('description')\n if 'name' in data:\n role.name = data.get('name')\n db.session.add(role)\n db.session.commit()\n return None, 204", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def role(self, role):\n\n self._role = int(role)", "def updateRole(role_name):\n\n if role_name == 'gsoc_mentor':\n updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')\n elif role_name == 'gsoc_org_admin':\n updater = RoleUpdater(\n GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')\n elif role_name == 'gsoc_student':\n updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')\n\n updater.run()\n return http.HttpResponse(\"Ok\")", "def _update(self, uuid, name, permissions):\n data = {\"name\": name, \"permissions\": permissions, \"uuid\": uuid}\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(\n method=\"post\", path=path, json=data, error_json_invalid=False\n )", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)", "def update_role(self, role_id, name: str) -> Role | None:\n role = self.get_session.get(self.role_model, role_id)\n if not role:\n return None\n try:\n role.name = name\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_UPD_ROLE.format(role))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_UPD_ROLE.format(e))\n self.get_session.rollback()\n return None\n return role", "def role(self, role):\n\n self._role = role", "def role(self, role):\n\n self._role = role", "def update(self, obj):\n if isinstance(obj, EventUser): # Need to convert list to string before storing in db\n obj.roles = str(obj.roles)\n self.s.commit()\n return obj", "def update_role(self, rolename, description):\n params = {\n \"f\" : \"json\",\n \"rolename\" : rolename\n }\n if description is not None:\n params['description'] = description\n uURL = self._url + \"/roles/update\"\n return self._con.post(path=uURL, postdata=params)", "def update(cls) -> None:\n raise NotImplementedError", "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-service_account', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.loads(payload)\n # updated_dict = transforms.json_to_dict(\n # transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self):\n self._client.patch(self)", "def update(self):\n db.session.commit()", "def update(self):\n db.session.commit()", "def _update(self):\n path = \"/members/%s\" % self._dict['member_id']\n data = self.extract()\n if self._dict['member_status_id'] in (\n MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):\n data['status_to'] = self._dict['member_status_id']\n if not self.account.adapter.put(path, data):\n raise ex.MemberUpdateError()", "def changeRoleInfo(self, role, info):", "async def put(\n self, user_id: str, /, data: UpdateAdministratorRoleRequest\n ) -> Union[r200[UserResponse], r404]:\n\n if user_id == self.request[\"client\"].user_id:\n raise HTTPBadRequest(text=\"Cannot change own role\")\n\n try:\n administrator = await get_data_from_req(\n self.request\n ).administrators.set_administrator_role(user_id, data.role)\n except ResourceNotFoundError:\n raise NotFound()\n\n return json_response(administrator, status=200)", "def update(self, *args):\n qry = UpdateEntityQuery(self)\n self.context.add_query(qry)\n return self", "def changeRole(self, node, role):", "async def update_cog(self):\n\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n # fetch the discord message\n guild_id = await self.get_deep_data(data, \"bot__server__uid\")\n\n guild = self.get_guild(int(guild_id))\n channel = self.get_channel(guild, int(data.message.cuid))\n message = await channel.fetch_message(int(data.message.uid))\n self.message_id = int(data.message.uid)\n\n # update the message\n await message.edit(content=\"_ _\", embed=self.create_message_embed(data))\n\n await self.update_reactions(message, data)", "def update(self):\n with managed_session() as session:\n session.merge(self)", "def update(self, **kwargs):\n print(\"Updating model\")\n print(kwargs)\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def test_ipam_roles_update(self):\n pass", "def update(self):\n\n pass", "def update(request, role_id):\n\n role = get_object_or_404(ProjectRole, pk=role_id)\n\n # require permission to proceed\n must_have_permission(request.user, role.project, \"can_edit_roles\")\n\n permittee = Permittee.objects.get_as_permittee(request.user)\n\n initial_set = list(role.obj_permissions.values_list(\"pk\", flat=True))\n\n # Get the permissions that the user can delegate to others as well\n # as the ones that are already in the role. Obtain DISTINCT values.\n obj_permissions = ObjectPermission.objects.filter_from_instance(\n role.project).filter(\n Q(permissionownership__permittee=permittee,\n permissionownership__can_delegate=True) |\n Q(id__in=initial_set)\n ).distinct()\n\n project_url = reverse(\"project_detail\", args=[role.project.id])\n\n # Use to update the permissions in the ProjectRole object so\n # users with that role are affected from the time this is updated\n def post_save(instance, created):\n from expedient.clearinghouse.roles.models import ObjectPermission\n new_obj_permissions_pks = [ p.pk for p in instance.obj_permissions.all() ]\n for permission in obj_permissions:\n # Add and delete permissions accordingly...\n try:\n instance.remove_permission(permission)\n except:\n pass\n if permission.pk in new_obj_permissions_pks:\n instance.add_permission(permission)\n\n return generic_crud(\n request,\n obj_id=role_id,\n model=ProjectRole,\n template=TEMPLATE_PATH+\"/update.html\",\n redirect=lambda instance: project_url,\n template_object_name=\"role\",\n form_class=ProjectRoleForm,\n extra_form_params={\n \"obj_permissions\": obj_permissions,\n },\n extra_context={\n \"project\": role.project,\n \"breadcrumbs\": (\n (\"Home\", reverse(\"home\")),\n (\"Project %s\" % role.project.name, project_url),\n (\"Update Role %s\" % role.name, request.path),\n )\n },\n post_save = post_save,\n )", "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-category', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def updateRoles(request):\n\n # update org admins\n #updateRole('gsoc_org_admin')\n\n # update mentors\n #updateRole('gsoc_mentor')\n\n # update students\n # we can assume that students cannot have any other roles, so we do not\n # need to set ETA\n updateRole('gsoc_student')", "def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def update(self, username, password, rol, **kwargs):\n\n self.usuario.groups.set([rol])\n if username != self.usuario.username:\n self.usuario.username = username\n \n if password:\n self.usuario.set_password(password)\n \n self.usuario.save()\n\n for field, value in kwargs.items():\n setattr(self, field, value)\n\n self.save()", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-course-category', {'key': key}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def update(self, *args, **kwargs):\n raise NotImplementedError", "def update(self, **options):\n pass", "async def update(self):\n self.data = await self.api.user.get()", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.update(self.__class__.__name__, data['id'], data)\n\n self.__dict__.update(saved_data)", "def _update(self):\n with sqlite3.connect(self.dbpath) as connection:\n cursor = connection.cursor()\n UPDATESQL = \"\"\"UPDATE accounts\n SET first_name=:first_name, last_name=:last_name, \n username=:username, email_address=:email_address, \n password_hash=:password_hash, balance=:balance, \n account_number=:account_number, admin=:admin\n WHERE id=:id;\"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"id\": self.id\n }\n try:\n cursor.execute(UPDATESQL, values)\n except sqlite3.IntegrityError:\n raise ValueError(\"ID (id) does not set in datebase.\")", "def update(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()", "async def setoperator(self, ctx, role_id: int, perms: int):\n s = db.session()\n role = s.query(db.AdminRole).filter(db.AdminRole.role_id == role_id).first()\n if role:\n if perms == 0:\n s.delete(role)\n else:\n role.perms = perms\n else:\n s.add(db.AdminRole(role_id=role_id, perms=perms))\n s.commit()\n s.close()\n await ctx.send(\"Role set\")", "def update(self, instance, validated_data):\n pass", "def update(self, sid, permission, **kwargs):\n kwargs['permission'] = permission\n return self.update_instance(sid, kwargs)", "def update(self, **kwargs):\n return self._update_data(self.put(None, data=kwargs))", "def edit_role(role_id, new_name=None, new_arn=None):\n\tsession = get_session()\n\told_data = get_role(role_id)\n\tdata = {}\n\tdata[\"name\"] = new_name or old_data[\"name\"]\n\tdata[\"arn\"] = new_arn or old_data[\"arn\"]\n\tresponse = session.put(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id), json=data)\n\treturn response.json()", "def patch(self, username, role):\n try:\n UserService.add_role_to_user(token_auth.current_user(), username, role)\n return {\"Success\": \"Role Added\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def update(self, arm, context, reward):\n raise NotImplementedError", "def update(self, arm, context, reward):\n raise NotImplementedError", "def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def updateRoles(self, obj, dom):\n domRoles = self.validateRoles(self.getRolesFromDOM(dom))\n moduleRoles = self.validateRoles(self.getRolesFromModule(obj))\n\n updateRoles = {}\n deleteUsers = []\n cancelRoles = []\n \n if self.action == 'create' or self.update_semantics == 'replace':\n # set default roles only if the dom contains no roles\n if len(domRoles.keys()) == 0:\n updateRoles = self.getDefaultRoles(\n self.pmt.getAuthenticatedMember().getId())\n else:\n updateRoles.update(domRoles)\n\n elif self.update_semantics == 'merge':\n updateRoles.update(moduleRoles)\n for role, userids in domRoles.items():\n userids = set(userids)\n userids.union(updateRoles.get(role, []))\n updateRoles[role] = list(userids)\n\n elif self.update_semantics == 'replace':\n currentUsers = set()\n for userids in moduleRoles.values():\n currentUsers.update(userids)\n domUsers = set()\n for userids in domRoles.values():\n domUsers.update(userids)\n for userids in updateRoles.values():\n domUsers.update(userids)\n deleteUsers = currentUsers.difference(domUsers)\n\n # XXX: Workaround for bug in generateCollaborationRequests that\n # requires a user listed in deleteRoles to be present in\n # newRoles\n for role, userids in moduleRoles.items():\n for user in deleteUsers:\n if user in userids:\n updateRoles.setdefault(role, [])\n updateRoles[role].append(user)\n\n self._updateRoles(obj, updateRoles, deleteUsers, cancelRoles)", "def update_workspaces_members_role(\n self,\n context,\n request: TracimRequest,\n hapic_data=None\n ) -> UserRoleWorkspaceInContext:\n app_config = request.registry.settings['CFG']\n rapi = RoleApi(\n current_user=request.current_user,\n session=request.dbsession,\n config=app_config,\n )\n\n role = rapi.get_one(\n user_id=hapic_data.path.user_id,\n workspace_id=hapic_data.path.workspace_id,\n )\n workspace_role = WorkspaceRoles.get_role_from_slug(hapic_data.body.role)\n role = rapi.update_role(\n role,\n role_level=workspace_role.level\n )\n return rapi.get_user_role_workspace_with_context(role)", "def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-course-featured', {'key': key}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def update(self, arm, reward):\n raise NotImplementedError", "def update(self, *args, **kw):\n pass", "def update_user_role(ranger_url, user_name, user_role, admin_username_password):\n url = format(\"{ranger_url}/service/xusers/secure/users/roles/userName/{user_name}\")\n\n role = {\n \"vXStrings\": [{\"value\": user_role}]\n }\n\n base_64_string = base64.encodestring(admin_username_password).replace('\\n', '')\n\n request = urllib2.Request(url, json.dumps(role))\n request.get_method = lambda: 'PUT'\n request.add_header('Content-Type', 'application/json')\n request.add_header('Accept', 'application/json')\n request.add_header('Authorization', format('Basic {base_64_string}'))\n\n try:\n result = openurl(request, timeout=20)\n response_code = result.getcode()\n if response_code == 200:\n Logger.info(format(\"Successfully updated {user_name} user with role {user_role} in Ranger Admin\"))\n return response_code\n else:\n Logger.error(format(\"Unable to update {user_name} user role with {user_role} in Ranger Admin\"))\n return None\n except urllib2.HTTPError as e:\n raise Fail(\n \"HTTPError while updating \" + str(user_name) + \" user role to \" + str(user_role) + \". Reason = \" + str(\n e.code))\n except urllib2.URLError as e:\n raise Fail(\n \"URLError while updating \" + str(user_name) + \" user role to \" + str(user_role) + \". Reason = \" + str(\n e.reason))\n except TimeoutError:\n raise Fail(\"Connection timeout error while updating \" + str(user_name) + \" user role to \" + str(user_role))\n except Exception as err:\n raise Fail(format(\"Error while updating {user_name} user role to {user_role}. Reason = {err}\"))", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def _overrideRole(self, newRole, args):\n oldRole = args.get('role', None)\n args['role'] = newRole\n return oldRole", "def update(self):\n raise NotImplementedError", "def update_role(self):\n all_leader = []\n user_records = self.info\n per = Persons()\n for record in user_records:\n if record['leader'] not in all_leader:\n all_leader.append(record['leader'])\n # print len(all_leader)\n # print all_leader\n for leader in all_leader:\n # print leader\n fil = per.get_one({'dn':leader})\n # print fil\n if fil is None:\n print 'this leader %s is not in our db,please check' % leader\n else:\n per.update_one({'dn':leader},{'role':'leader'})", "def set_keystone_v3_role(self, role_id, role_new_name):\n LOG_OBJ.debug(\"Creating the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _role_info = {\"role\": {\n \"name\": role_new_name}}\n _body = json.dumps(_role_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the role\")\n print (\"No response from Server while set the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def update_user():", "def update_forum_role(course_id, user, rolename, action):\r\n role = Role.objects.get(course_id=course_id, name=rolename)\r\n\r\n if action == 'allow':\r\n role.users.add(user)\r\n elif action == 'revoke':\r\n role.users.remove(user)\r\n else:\r\n raise ValueError(\"unrecognized action '{}'\".format(action))", "async def editrole(self, ctx: context.CustomContext, *, role: Fuzzy[Selfrole]):\n\n new_join_message = await ctx.input(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with the new join message for `{role.role.name}`.\"\n f\"\\n{config.HINT} The current join message is: `{role.join_message}`\"\n )\n\n await self.bot.db.execute(\n \"UPDATE selfrole SET join_message = $1 WHERE role_id = $2\",\n new_join_message,\n role.role.id,\n )\n\n await ctx.send(\n f\"{config.YES} The join message for `{role.role.name}` was updated.\"\n )", "def update(self, reward):\n raise NotImplementedError", "def modify(self, context: Context):\n data = attr.asdict(self)\n\n yield self\n self.last_author = context.user.nickname\n self.updated_at = pendulum.now()\n\n try:\n attr.validate(self)\n except TypeError:\n logger.warning(\"failed to validate {}\", self)\n for key, value in data.items():\n # since we can't overwrite self we will need to roll back one by one\n setattr(self, key, value)\n raise", "def update(self, data):\n # TODO: try not to use setattr\n for key, item in data.items():\n if key == \"password\":\n new_password = self.__generate_hash(item)\n setattr(self, key, new_password)\n else:\n setattr(self, key, item)\n\n super().update(data)\n db.session.commit()", "def update(self, **data):\n for attribute in data:\n if hasattr(self, attribute):\n setattr(self, attribute, data[attribute])\n if \"password\" in data:\n self.password = data[\"password\"]", "def update(cls, db):\n ret = db.query(cls).filter(cls.id == 1).first()\n if ret is None:\n ret = HPCAccountUpdatesORM(id=1)\n else:\n ret.last_update = utcnow()\n db.add(ret)\n db.commit()", "def update(self, uid):\n raise NotImplementedError", "async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def update(self, *args, **kwargs):\n if args:\n self.__update(*args)\n elif kwargs:\n self.__update(**kwargs)", "def update(self, request, *args, **kwargs):\n return super(UserViewSet, self).update(request, *args, **kwargs)", "def update(self):\n raise NotImplementedError()", "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-layout', {'key': self.KEY}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def on_update(self):\n\t\tusers = frappe.get_all('User', filters={'role_profile_name': self.name})\n\t\troles = [role.role for role in self.roles]\n\t\tfor d in users:\n\t\t\tuser = frappe.get_doc('User', d)\n\t\t\tuser.set('roles', [])\n\t\t\tuser.add_roles(*roles)", "def set_role(userid, role, group, request=None):" ]
[ "0.75100726", "0.73120785", "0.6926027", "0.6725893", "0.6725881", "0.66784096", "0.6674918", "0.6656022", "0.65352", "0.6417849", "0.64106894", "0.62678677", "0.62678677", "0.6230835", "0.6226089", "0.6220385", "0.6199606", "0.6199606", "0.6199606", "0.6171208", "0.6162495", "0.61129594", "0.61129594", "0.6050665", "0.5992274", "0.59666497", "0.59116685", "0.58885", "0.58885", "0.58885", "0.5886721", "0.5863131", "0.5863131", "0.5849777", "0.5784674", "0.5775934", "0.57486767", "0.57388544", "0.5727185", "0.5717251", "0.5709636", "0.570921", "0.57080257", "0.56938994", "0.5675271", "0.56621826", "0.5655654", "0.56531906", "0.56453866", "0.56306005", "0.5625035", "0.5621523", "0.56129295", "0.560743", "0.5594735", "0.55929196", "0.5584783", "0.55759096", "0.55759096", "0.5568666", "0.55631196", "0.5558675", "0.5550692", "0.5522312", "0.55182797", "0.5512526", "0.55091345", "0.55091345", "0.54989064", "0.5496324", "0.54941636", "0.5489804", "0.54830277", "0.54818416", "0.54758084", "0.5470839", "0.5458202", "0.5458202", "0.5458202", "0.54541683", "0.5454009", "0.5450311", "0.5441402", "0.54409844", "0.5440496", "0.54374737", "0.54354787", "0.543546", "0.54267275", "0.54141074", "0.54072785", "0.5395139", "0.5394282", "0.5376106", "0.5375187", "0.5374395", "0.53726107", "0.53649855", "0.5362321", "0.5351425" ]
0.53856415
93
Delete a given Role
def delete(self, sid): return self.delete_instance(sid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()", "def delete_token_role(self, role):\n return self.delete('auth/token/roles/{0}'.format(role))", "def delete_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))", "def deleteRole(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, role_id):\n self.client.delete_role(role_id)", "def role_delete(\n login_manager: LoginManager, *, role_id: str, endpoint_id: uuid.UUID\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n res = transfer_client.delete_endpoint_role(endpoint_id, role_id)\n display(res, text_mode=TextMode.text_raw, response_key=\"message\")", "def delete_role(id):\r\n check_admin()\r\n\r\n role = Role.query.get_or_404(id)\r\n db.session.delete(role)\r\n db.session.commit()\r\n flash('You have successfully deleted the role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n return render_template(title=\"Delete Role\")", "def delete(self, app, role, privilege):\n \n # check user's privileges\n h.checkAccess('delete')\n\n model = RolesModel()\n model.deletePrivilege( app, role, privilege )\n\n # returns empty reply", "async def on_guild_role_delete(self, role):\n channel = self.client.get_channel(serverlogs.getChannel(role.guild.id, \"roles\"))\n if channel is not None:\n await self.log_role(role=role, type='Delete', channel=channel, guild=role.guild)", "def delete_role(id):\n\tcheck_admin()\n\trole = Role.query.get_or_404(id)\n\tdb.session.delete(role)\n\tdb.session.commit()\n\tflash(\"You have successfully deleted the role from the database\")\n\n\t#redirect to the roles page\n\treturn redirect(url_for('admin.list_roles'))\n\n\treturn render_template(title = \"Delete Role\")", "def delete_role(role_id):\n\tsession = get_session()\n\tsession.delete(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id))", "def test_delete_role(self):\n pass", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def delete_role(self, role_name: str) -> None:\n session = self.get_session\n role = session.query(Role).filter(Role.name == role_name).first()\n if role:\n log.info(\"Deleting role '%s'\", role_name)\n session.delete(role)\n session.commit()\n else:\n raise AirflowException(f\"Role named '{role_name}' does not exist\")", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def deleteUserRole(self, name, role):\n self._client.deleteUserRole(name, role)", "def delete(profile, name):\n # Make sure the role exists.\n if not exists(profile, name):\n msg = \"No role '\" + str(name) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Now try to delete it.\n params = {}\n params[\"profile\"] = profile\n params[\"role\"] = name\n response = utils.do_request(role_lib, \"delete\", params)\n\n # Check that it was, in fact, deleted.\n if exists(profile, name):\n msg = \"The role '\" + str(name) + \"' was not deleted.\"\n raise ResourceNotDeleted(msg)", "def deleteRolePermission(self, role, _type):\n self._client.deleteRolePermission(role, _type)", "def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "def deleteRoleAccess(self, role, read, write, catalog='*', repository='*'):\n self._client.deleteRoleAccess(role, read, write, catalog, repository)", "def test_delete_namespaced_role(self):\n pass", "def delete_role(resource_root, service_name, name, cluster_name=\"default\"):\n return call(resource_root.delete,\n _get_role_path(cluster_name, service_name, name), ApiRole)", "async def deleterole(self, ctx: context.CustomContext, *, role: str):\n\n try:\n selfrole = await Fuzzy[Selfrole].convert(ctx, role)\n except exceptions.NotFoundError:\n return await ctx.send(\n f\"{config.NO} This server has no selfrole that matches `{role}`.\"\n )\n\n if selfrole.role:\n hard_delete = await ctx.confirm(\n f\"{config.USER_INTERACTION_REQUIRED} Should I also delete the \"\n f\"Discord role `{selfrole.role.name}`, instead of just removing the \"\n f\"selfrole from the list of selfroles in `{config.BOT_PREFIX}roles`?\"\n )\n else:\n hard_delete = False\n\n await self.bot.db.execute(\n \"DELETE FROM selfrole WHERE guild_id = $1 AND role_id = $2\",\n ctx.guild.id,\n selfrole.role.id,\n )\n\n if hard_delete:\n try:\n await selfrole.role.delete()\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.DELETE_ROLE, detail=selfrole.role.name\n )\n\n return await ctx.send(\n f\"{config.YES} The `{role}` selfrole and its Discord role were deleted.\"\n )\n\n await ctx.send(\n f\"{config.YES} The `{role}` selfrole was removed from the `{config.BOT_PREFIX}roles` list but \"\n f\"I did not delete its Discord role.\"\n )", "def _delete(self, uuid):\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(method=\"delete\", path=path, error_json_invalid=False)", "def delete(self, request, *args, **kwargs):\n return super(RoleDetailAPIView, self).delete(request, *args, **kwargs)", "def removeRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='remove', modelType='role')", "def delete_role(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/role/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.delete(\n url=api_path,\n )", "def delete_keystone_v3_role(self, role_id):\n LOG_OBJ.debug(\"Deleting the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the role\")\n print (\"No response from Server while deleting the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting role Failed with status %s and error\"\n \" : %s \" % (response.status, response.data))\n print (\" Deleting role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})", "def delete(self, id, timeout=None):\n req = RoleDeleteRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Delete(\n req,\n metadata=self.parent.get_metadata('Roles.Delete', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleDeleteResponse()\n resp.meta = plumbing.convert_delete_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def revoke_role(self, role, space=None, project=None, reason='no reason specified'):\n\n techId = None\n if isinstance(role, int):\n techId = role\n else:\n query = { \"role\": role }\n if space is None:\n query['space'] = ''\n else:\n query['space'] = space.code.upper()\n\n if project is None:\n query['project'] = ''\n else:\n query['project'] = project.code.upper()\n\n # build a query string for dataframe\n querystr = \" & \".join( \n '{} == \"{}\"'.format(key, value) for key, value in query.items()\n )\n roles = self.get_roles().df\n if len(roles) == 0:\n if VERBOSE:\n print(f\"Role {role} has already been revoked from person {self.code}\")\n return\n techId = roles.query(querystr)['techId'].values[0]\n\n # finally delete the role assignment\n ra = self.openbis.get_role_assignment(techId)\n ra.delete(reason)\n if VERBOSE:\n print(\n \"Role {} successfully revoked from person {}\".format(role, self.code)\n ) \n return", "def revoke_role(self, role, principal_ids):", "def remove_trainee(role_id):\n\n role = Role.query.get(role_id)\n if role is None or role.role_id != RoleIds.Trainee:\n flash(\"Role invalide\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n if role.activity_type not in current_user.get_supervised_activities():\n flash(\"Non autorisé\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n db.session.delete(role)\n db.session.commit()\n\n return redirect(url_for(\".leader_list\"))", "def delete_role(self, role_name):\n try:\n # In case role doesn't exist\n response = False\n\n Oprint.info('Deleting role {}'.format(role_name), 'iam')\n response = self._client.delete_role(RoleName=role_name)\n Oprint.info('Complete deleting role {}'.format(role_name), 'iam')\n except Exception as e:\n Oprint.err(str(e.response['Error']['Message']), 'iam', exit=False)\n\n return response", "def delete_role_and_associated_policies(self, role_name):\n try:\n self.detach_role_managed_policies(role_name)\n self.delete_role_inline_policies(role_name)\n \n response = self.delete_role(role_name)\n except Exception as e:\n Oprint.err(e, 'iam', exit=False)\n\n return response", "async def deleteRole(self, ctx, reason=\"No reason available\"):\n for role in ctx.guild.roles:\n if role.name == self.categoryName:\n try:\n await role.delete(reason=reason)\n except discord.errors.Forbidden:\n self.msgToDelete.append(await ctx.message.channel.send(\n \"Erreur, permission non accordée, la suppression des rôles n'est pas complète.\"))\n print(\"Deleted all roles.\")", "def _remove_role(contest, user, role_class):\n user_biv_id = _lookup_user(user).biv_id\n role = role_class.query.select_from(pam.BivAccess).filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == role_class.biv_id\n ).one()\n db.session.delete(\n pam.BivAccess.query.filter(\n pam.BivAccess.source_biv_id == contest,\n pam.BivAccess.target_biv_id == role.biv_id\n ).one()\n )", "def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)", "def test_delete_cluster_role(self):\n pass", "def test_delete_role(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 204, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 2)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=self.project, user=self.assign_user\n ).count(),\n 0,\n )", "async def command_unassign_role(self, context, role: str):\n try:\n await context.author.remove_roles(discord.utils.get(context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be unassigned')\n print(f'Errored in command_unassign_role.', e)", "async def rolemenu_remove_role(self, interaction: discord.Interaction,\n name: str, role: str):\n try:\n role_id = int(role)\n except ValueError:\n return await interaction.response.send_message(\n \"The role provided \"\n \"is not valid. Make sure that you either select one from the \"\n \"options that the autocomplete provides, or that you \"\n \"provide the role's ID\",\n ephemeral=True)\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role_id:\n break\n else:\n return await interaction.followup.send(\n \"Role not found in that menu\")\n await self.db.update_one({\"_id\": doc[\"_id\"]},\n {\"$pull\": {\n \"roles\": role_doc\n }})\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role removed from the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()", "async def removerole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n if role.id in excluded_roles:\n excluded_roles.remove(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n await ctx.send(\"Removed %s from role exclusion list.\" % role.name)\n else:\n await ctx.send(\"%s is not an excluded role.\" % role.name)", "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def remove_role(role_id: int) -> bool:\n role: Role = db.session.query(Role).get(role_id)\n if role is None:\n return False # this role did not exist in the first place\n db.session.delete(role)\n db.session.commit()\n return True", "async def rolemenu_delete(self, interaction: discord.Interaction,\n name: str):\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"Role menu with that name does not exist.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n await self.db.delete_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role menu removed.\", ephemeral=True)", "def _delete_roles(self):\n for role in self.roles:\n role.delete()", "def test_ipam_roles_delete(self):\n pass", "def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def deleteRoleSecurityFilter(self, role, _type, s=None, p=None, o=None, g=None):\n self._client.deleteRoleSecurityFilter(role, _type, s, p, o, g)", "def delete_station_role(self, station_id, role_id):\n return self._stations_service.delete_station_role(station_id, role_id)", "def test_delete_role_emits_event(self):\n with events.events.disconnect_receivers():\n role = role_service.create('admin')\n spy = mock.Mock()\n events.role_deleted_event.connect(spy, weak=False)\n role_service.delete(role)\n spy.assert_called_with(role)", "def removeRole(self, role_id, REQUEST=None):\n for principal_id in self._principal_roles.keys():\n self.removeRoleFromPrincipal(role_id, principal_id)\n\n del self._roles[role_id]", "async def erase(self, guild: discord.Guild):\n role = await self.get_role(guild=guild)\n if role:\n await role.delete()", "def test_remove_role_from_project_member(self):\n pass", "def remove_role(self, rolename):\n params = {\n \"f\" : \"json\",\n \"rolename\" : rolename\n }\n uURL = self._url + \"/roles/remove\"\n return self._con.post(path=uURL,\n postdata=params)", "def delete_implied_role(self, prior_role_id, implied_role_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_delete_namespaced_role_binding(self):\n pass", "def delete_node_role_dimension(session, dimension):\n # type: (Session, str) -> None\n if not session.network:\n raise ValueError(\"Network must be set to delete a node role dimension\")\n if not dimension:\n raise ValueError(\"Dimension must be a non-empty string\")\n url_tail = \"/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_NODE_ROLES,\n dimension,\n )\n return _delete(session, url_tail)", "def remove(self, user_id, role=None, roles=None):\n\n if role:\n params = {\n 'roles': [role]\n }\n elif roles:\n params = {\n 'roles': roles\n }\n\n route = '/openstack/users/%s/roles'\n url = route % (user_id)\n try:\n self._delete(url, json=params)\n except exc.HTTPBadRequest as e:\n print(e.message)\n return False\n\n return True", "def delete(self, id, timeout=None):\n req = RoleGrantDeleteRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Delete(\n req,\n metadata=self.parent.get_metadata('RoleGrants.Delete',\n req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleGrantDeleteResponse()\n resp.meta = plumbing.convert_delete_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def deregister_role(self, ctx, role_title: str, guild_id: int = None):\n\n if guild_id is None:\n guild = ctx.guild\n guild_id = guild.id\n else:\n guild = self.bot.get_guild(guild_id)\n if guild is None:\n await ctx.send(\"Couldn't find the guild provided.\")\n return\n\n role_key = \"guild:{}:roles:roles:{}\".format(guild_id, role_title.lower())\n if self.config.exists(role_key):\n self.config.remove(role_key)\n self.config.hdel(\"guild:{}:roles:all:names\".format(guild_id), role_title)\n await ctx.send(\"Role '{}' was deregistered.\".format(role_title))\n else:\n await ctx.send(\"The given role doesn't exist in the specified guild.\")", "def remove_role(self, name):\n role = Role.by_name(name)\n if not role:\n return\n if role in self.roles:\n self.roles.remove(role)", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','roles'):\n abort(403)", "def delete_namespaced_role(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_role`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `delete_namespaced_role`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_role`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/roles/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_station_role_resource_policy(self, station_id, role_id):\n return self._stations_service.delete_station_role_resource_policy(\n station_id, role_id\n )", "def remove_keystone_v3_role_from_user_or_group(self, user_id,\n domain_id, role_id):\n LOG_OBJ.debug(\"Removing the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(domain_id) + \"/users/\" + str(user_id) + \"/roles/\" + \\\n str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while removing role\")\n print (\"No response from Server while removing role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Removing role Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\"Removing role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def delete(self, *args, **kwargs):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = False\n self.status = User.STATUS_DELETED\n self.save()\n else:\n self.is_active = False\n self.status = User.STATUS_DELETED\n self.save()", "async def unset(self, ctx, *, role_name: str):\n role_name = role_name.lower()\n\n if isinstance(ctx.message.channel, discord.DMChannel):\n guild = await self.get_server_from_pm(ctx)\n else:\n guild = ctx.guild\n\n if guild is None:\n return\n\n await self.remove_role(ctx, role_name, guild)", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def removeInheritedRole(self, role=None, roleName=None, kvDict=None):\n\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role', inherit=True)", "async def removepersistrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removepersistrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n persistent_role = Roles(\n bot=self.bot,\n guild_id=ctx.guild.id,\n user_id=member.id,\n roles=role.id,\n )\n # Post to db for persistent role\n await persistent_role.delete()\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"Persisting Role *{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def remove_role(profile, instance_profile, role):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n params[\"RoleName\"] = role\n return client.remove_role_from_instance_profile(**params)", "def test_delete_cluster_role_binding(self):\n pass", "def remove_role(user_id):\n role = roles.get_or_404(int(request.values.get('role_id', None)))\n user = users.get_or_404(user_id)\n if not users.remove_role_from_user(user, role):\n return {}, 500\n return {}", "def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))", "def test_delete_delegate_unauthorized(self):\n new_user = self.make_user('new_user')\n delegate_as = self.make_assignment(\n self.project, new_user, self.role_delegate\n )\n self.assertEqual(RoleAssignment.objects.count(), 4)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': delegate_as.sodar_uuid},\n )\n # NOTE: Perform record as contributor user\n token = self.get_token(self.assign_user)\n response = self.request_knox(url, method='DELETE', token=token)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 4)", "def remove_permission_from_role(self, role: Role, permission: Permission) -> None:\n if permission in role.permissions:\n try:\n role.permissions.remove(permission)\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_DEL_PERMROLE.format(permission, role.name))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_DEL_PERMROLE.format(e))\n self.get_session.rollback()", "def test_delete_namespaced_role_binding_restriction(self):\n pass", "def role_deassign(user_id, role_id):\n user = _get_user_or_404(user_id)\n role = _get_role_or_404(role_id)\n initiator_id = g.user.id\n\n authorization_service.deassign_role_from_user(\n role.id, user.id, initiator_id=initiator_id\n )\n\n flash_success(\n gettext(\n '%(role_title)s has been withdrawn from \"%(screen_name)s\".',\n screen_name=user.screen_name,\n role_title=role.title,\n )\n )", "def remove_user_from_role(request, username_or_email, role, group_title, event_name):\r\n\r\n username_or_email = strip_if_string(username_or_email)\r\n try:\r\n user = _user_from_name_or_email(username_or_email)\r\n except User.DoesNotExist:\r\n return u'<font color=\"red\">Error: unknown username or email \"{0}\"</font>'.format(username_or_email)\r\n\r\n role.remove_users(user)\r\n\r\n # Deal with historical event names\r\n if event_name in ('staff', 'beta-tester'):\r\n track.views.server_track(\r\n request,\r\n \"add-or-remove-user-group\",\r\n {\r\n \"event_name\": event_name,\r\n \"user\": unicode(user),\r\n \"event\": \"remove\"\r\n },\r\n page=\"idashboard\"\r\n )\r\n else:\r\n track.views.server_track(request, \"remove-instructor\", {\"instructor\": unicode(user)}, page=\"idashboard\")\r\n\r\n return '<font color=\"green\">Removed {0} from {1}</font>'.format(user, group_title)", "async def fulldelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(\r\n f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete role: {role}\"\r\n )\r\n for channel in ctx.guild.channels:\r\n try:\r\n await channel.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]CHANNEL => {Fore.RESET}Failed to delete: {channel}\")", "def remove_permission(self, role, permission):\n return permissions.utils.remove_permission(self, role, permission)", "def revoke_role_from_user_on_project(self, project_id, user_id, role_id):\n resp, body = self.delete('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id))\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "async def remove_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n message = \"\"\n removed = []\n not_found = []\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n for role in roles:\n if role.id in roles_list:\n roles_list.remove(role.id)\n removed.append(role.name)\n else:\n not_found.append(role.name)\n if not_found:\n message += \"\\nRole(s) not found in autorole list: {roles}\".format(\n roles=humanize_list(not_found)\n )\n if removed:\n message += \"\\nRole(s) remove from autorole list: {roles}\".format(\n roles=humanize_list(removed)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def deserialize_guild_role_delete_event(\n self,\n shard: gateway_shard.GatewayShard,\n payload: data_binding.JSONObject,\n *,\n old_role: typing.Optional[guild_models.Role],\n ) -> role_events.RoleDeleteEvent:", "def detach(profile, instance_profile, role):\n # Make sure the instance profile exists.\n if not exists(profile, instance_profile):\n msg = \"No instance profile '\" + str(instance_profile) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Make sure the role exists.\n if not role_jobs.exists(profile, role):\n msg = \"No role '\" + str(role) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Detach the role\n params = {}\n params[\"profile\"] = profile\n params[\"instance_profile\"] = instance_profile\n params[\"role\"] = role\n return utils.do_request(instanceprofile, \"remove_role\", params)", "def remove_role_from_user(self, user, role):\n rv = False\n user, role = self._prepare_role_modify_args(user, role)\n if role in user.roles:\n rv = True\n user.roles.remove(role)\n # noinspection PyUnresolvedReferences\n self.save(user)\n return rv", "def test_delete_owner(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.owner_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)", "async def removeroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removeroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def removeRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.remove_roles(*roles)\n await ctx.send(f\"Removing {roles_str(person, roles)}\")", "def revoke_role_from_group_on_project(self, project_id, group_id, role_id):\n resp, body = self.delete('projects/%s/groups/%s/roles/%s' %\n (project_id, group_id, role_id))\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def role_absent(name, profile=None, **connection_args):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": 'Role \"{}\" is already absent'.format(name),\n }\n\n # Check if role is present\n role = __salt__[\"keystone.role_get\"](name=name, profile=profile, **connection_args)\n if \"Error\" not in role:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Role \"{}\" will be deleted'.format(name)\n return ret\n # Delete role\n __salt__[\"keystone.role_delete\"](name=name, profile=profile, **connection_args)\n ret[\"comment\"] = 'Role \"{}\" has been deleted'.format(name)\n ret[\"changes\"][\"Role\"] = \"Deleted\"\n\n return ret", "def delete_company(id):\n\n companyss = Company_list.query.get_or_404(id)\n db.session.delete(companyss)\n db.session.commit()\n flash('You have successfully deleted the company.')\n\n # redirect to the roles page\n return redirect(url_for('main.company_namelist'))", "def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):\n resp, body = self.delete('domains/%s/users/%s/roles/%s' %\n (domain_id, user_id, role_id))\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "async def afterHoursRemoveRole(self, ctx: Context):\n # check if after hours role is set\n roleid = await self.config.guild(ctx.guild).get_attr(KEY_ROLE_ID)()\n if roleid is None:\n await ctx.send(\"Please configure the after-hours role first!\")\n return\n # get after hours role by id\n role = ctx.guild.get_role(roleid)\n # if id is no longer valid (role deleted most likely)\n if role is None:\n await ctx.send(\n \"After Hours role no longer valid, most likely role was deleted by admins\"\n )\n return\n\n # check if user has roles\n rolesList = ctx.author.roles\n if role not in rolesList:\n await ctx.send(f\"You do not have the role {role.name}\")\n return\n # remove role\n try:\n await ctx.author.remove_roles(role, reason=\"User removed role\")\n except discord.Forbidden:\n self.logger.error(\"Not allowed to remove role\", exc_info=True)\n except discord.HTTPException:\n self.logger.error(\"HTTP Exception\", exc_info=True)\n\n # post message saying role removed\n await ctx.send(f\"Removed the role {role.name} from you.\")" ]
[ "0.82603055", "0.8098692", "0.80856603", "0.80064636", "0.79942334", "0.79914016", "0.791367", "0.7900632", "0.78198117", "0.779961", "0.778778", "0.7761136", "0.77352875", "0.76953554", "0.76781493", "0.76682544", "0.76425016", "0.7444573", "0.7420755", "0.7399109", "0.7398379", "0.7391509", "0.7222078", "0.72062695", "0.7179278", "0.7175388", "0.71622443", "0.71616524", "0.7072597", "0.70612377", "0.6999234", "0.69935834", "0.69914633", "0.6974879", "0.6948077", "0.69141203", "0.68647194", "0.67883587", "0.67712986", "0.6738924", "0.67335016", "0.67271405", "0.6709582", "0.6706644", "0.66758966", "0.66628647", "0.66614664", "0.6640332", "0.6628782", "0.66148335", "0.66077185", "0.6572497", "0.65345114", "0.6520824", "0.6508134", "0.6504373", "0.6454494", "0.64440805", "0.64004576", "0.63870597", "0.6372547", "0.6347514", "0.6338038", "0.6323471", "0.6316548", "0.63142866", "0.6312792", "0.6286104", "0.6276302", "0.62538666", "0.6242574", "0.6171362", "0.6169964", "0.61591446", "0.61401767", "0.61360216", "0.6134432", "0.6099427", "0.6089729", "0.6058965", "0.6050645", "0.60477966", "0.60152966", "0.6010337", "0.6004193", "0.5985476", "0.59849244", "0.5982487", "0.59640926", "0.5944025", "0.5922394", "0.59001964", "0.5886655", "0.58587813", "0.58394676", "0.5828844", "0.5824503", "0.5823042", "0.58194864", "0.5774306", "0.57733035" ]
0.0
-1
Updates the Role instance identified by sid
def update(self, sid, permission, **kwargs): kwargs['permission'] = permission return self.update_instance(sid, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.update_role(model)", "def update_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "def updateRole(self, role_id, title, description):\n self._roles[role_id].update({'title': title,\n 'description': description})", "def update(self, role):\n self._router_request(\n self._make_request_data(\n 'updateAdminRole',\n data=dict(\n params=dict(\n uid=self.uid,\n name=self.name,\n role=role\n )\n )\n )\n )\n\n self.role = role\n\n return True", "def put(self, id):\n data = request.json\n role = Role.query.filter(Role.id == id).one()\n if 'description' in data:\n role.description = data.get('description')\n if 'name' in data:\n role.name = data.get('name')\n db.session.add(role)\n db.session.commit()\n return None, 204", "def manage_updateRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n self.updateRole(role_id, title, description)\n\n message = 'Role+updated'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?role_id=%s&'\n 'manage_tabs_message=%s' %\n (self.absolute_url(), role_id, message))", "def update_role(self, role_id, name: str) -> Role | None:\n role = self.get_session.get(self.role_model, role_id)\n if not role:\n return None\n try:\n role.name = name\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_UPD_ROLE.format(role))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_UPD_ROLE.format(e))\n self.get_session.rollback()\n return None\n return role", "def updateRole(role_name):\n\n if role_name == 'gsoc_mentor':\n updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')\n elif role_name == 'gsoc_org_admin':\n updater = RoleUpdater(\n GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')\n elif role_name == 'gsoc_student':\n updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')\n\n updater.run()\n return http.HttpResponse(\"Ok\")", "def edit_role(role_id, new_name=None, new_arn=None):\n\tsession = get_session()\n\told_data = get_role(role_id)\n\tdata = {}\n\tdata[\"name\"] = new_name or old_data[\"name\"]\n\tdata[\"arn\"] = new_arn or old_data[\"arn\"]\n\tresponse = session.put(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id), json=data)\n\treturn response.json()", "def changeRole(self, node, role):", "def update(self, role, timeout=None):\n req = RoleUpdateRequest()\n\n if role is not None:\n req.role.CopyFrom(plumbing.convert_role_to_plumbing(role))\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Update(\n req,\n metadata=self.parent.get_metadata('Roles.Update', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleUpdateResponse()\n resp.meta = plumbing.convert_update_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role = plumbing.convert_role_to_porcelain(plumbing_response.role)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "async def save(self):\n await config.member(self.member).set_raw(str(self.role.id), value=self.as_dict)", "def update_role(self, rolename, description):\n params = {\n \"f\" : \"json\",\n \"rolename\" : rolename\n }\n if description is not None:\n params['description'] = description\n uURL = self._url + \"/roles/update\"\n return self._con.post(path=uURL, postdata=params)", "def _update(self, uuid, name, permissions):\n data = {\"name\": name, \"permissions\": permissions, \"uuid\": uuid}\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(\n method=\"post\", path=path, json=data, error_json_invalid=False\n )", "def changeRoleInfo(self, role, info):", "def updateRoles(request):\n\n # update org admins\n #updateRole('gsoc_org_admin')\n\n # update mentors\n #updateRole('gsoc_mentor')\n\n # update students\n # we can assume that students cannot have any other roles, so we do not\n # need to set ETA\n updateRole('gsoc_student')", "def save(self):\n body = {}\n body[\"permissions\"] = dict(self.permissions)\n body[\"name\"] = self.name\n body[\"description\"] = self.description\n _, role = self._requestor.patch('/roles/' + self._id, json=body)\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])", "def update_forum_role(course_id, user, rolename, action):\r\n role = Role.objects.get(course_id=course_id, name=rolename)\r\n\r\n if action == 'allow':\r\n role.users.add(user)\r\n elif action == 'revoke':\r\n role.users.remove(user)\r\n else:\r\n raise ValueError(\"unrecognized action '{}'\".format(action))", "def role(self, role):\n\n self._role = int(role)", "def updateStudents(request):\n\n return updateRole('gsoc_student')", "def set_keystone_v3_role(self, role_id, role_new_name):\n LOG_OBJ.debug(\"Creating the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _role_info = {\"role\": {\n \"name\": role_new_name}}\n _body = json.dumps(_role_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the role\")\n print (\"No response from Server while set the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def update_roles_by_id(self, role_ids):\n return self.update_supergroups_by_id(role_ids, 'role')", "def updatesid(dn, sid, l):\n mod_attrs = [(ldap.MOD_REPLACE, 'sambaSID', sid )]\n l.modify_s(dn, mod_attrs)", "def modify_role():\n\n id_hash = request.args.get('id')\n\n if not id_hash or id_hash=='':\n flash('There is no id.','error')\n return redirect(url_for('user_ksat.manage_role'))\n #Localizamos el role y luego lo modificamos\n modify_role = Role.query.filter_by(id=hashids_hasher.decode(id_hash)).first()\n\n if not modify_role:\n flash('There is no role.','error')\n return redirect(url_for('user_ksat.manage_role'))\n\n role_form = RoleForm(name=modify_role.name)\n\n if role_form.validate_on_submit():\n new_name = request.form['name']\n if not new_name or new_name == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_role.html', title='Modify Role',form=role_form)\n else:\n \n modify_role.name = new_name\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error when modifying a Role.','error')\n else:\n flash(\"Our Role was modified!\",\"success\")\n return redirect(url_for('user_ksat.manage_role'))\n\n return render_template('user/add_edit_role.html', title='Role',form=role_form)", "async def put(\n self, user_id: str, /, data: UpdateAdministratorRoleRequest\n ) -> Union[r200[UserResponse], r404]:\n\n if user_id == self.request[\"client\"].user_id:\n raise HTTPBadRequest(text=\"Cannot change own role\")\n\n try:\n administrator = await get_data_from_req(\n self.request\n ).administrators.set_administrator_role(user_id, data.role)\n except ResourceNotFoundError:\n raise NotFound()\n\n return json_response(administrator, status=200)", "def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)", "def updateRoles(self, obj, dom):\n domRoles = self.validateRoles(self.getRolesFromDOM(dom))\n moduleRoles = self.validateRoles(self.getRolesFromModule(obj))\n\n updateRoles = {}\n deleteUsers = []\n cancelRoles = []\n \n if self.action == 'create' or self.update_semantics == 'replace':\n # set default roles only if the dom contains no roles\n if len(domRoles.keys()) == 0:\n updateRoles = self.getDefaultRoles(\n self.pmt.getAuthenticatedMember().getId())\n else:\n updateRoles.update(domRoles)\n\n elif self.update_semantics == 'merge':\n updateRoles.update(moduleRoles)\n for role, userids in domRoles.items():\n userids = set(userids)\n userids.union(updateRoles.get(role, []))\n updateRoles[role] = list(userids)\n\n elif self.update_semantics == 'replace':\n currentUsers = set()\n for userids in moduleRoles.values():\n currentUsers.update(userids)\n domUsers = set()\n for userids in domRoles.values():\n domUsers.update(userids)\n for userids in updateRoles.values():\n domUsers.update(userids)\n deleteUsers = currentUsers.difference(domUsers)\n\n # XXX: Workaround for bug in generateCollaborationRequests that\n # requires a user listed in deleteRoles to be present in\n # newRoles\n for role, userids in moduleRoles.items():\n for user in deleteUsers:\n if user in userids:\n updateRoles.setdefault(role, [])\n updateRoles[role].append(user)\n\n self._updateRoles(obj, updateRoles, deleteUsers, cancelRoles)", "async def setoperator(self, ctx, role_id: int, perms: int):\n s = db.session()\n role = s.query(db.AdminRole).filter(db.AdminRole.role_id == role_id).first()\n if role:\n if perms == 0:\n s.delete(role)\n else:\n role.perms = perms\n else:\n s.add(db.AdminRole(role_id=role_id, perms=perms))\n s.commit()\n s.close()\n await ctx.send(\"Role set\")", "def sid(self, sid):\n self._sid = sid", "async def update(self, ctx):\n\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n # fetch the discord message\n guild_id = await self.get_deep_data(data, \"bot__server__uid\")\n\n guild = self.get_guild(int(guild_id))\n channel = self.get_channel(guild, int(data.message.cuid))\n message = await channel.fetch_message(int(data.message.uid))\n\n # update the message\n await message.edit(content=\"_ _\", embed=self.create_message_embed(data))\n\n await self.update_reactions(message, data)\n\n await ctx.send(\"Updated.\")", "def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()", "def role(self, role):\n\n self._role = role", "def role(self, role):\n\n self._role = role", "def update_user_role(ranger_url, user_name, user_role, admin_username_password):\n url = format(\"{ranger_url}/service/xusers/secure/users/roles/userName/{user_name}\")\n\n role = {\n \"vXStrings\": [{\"value\": user_role}]\n }\n\n base_64_string = base64.encodestring(admin_username_password).replace('\\n', '')\n\n request = urllib2.Request(url, json.dumps(role))\n request.get_method = lambda: 'PUT'\n request.add_header('Content-Type', 'application/json')\n request.add_header('Accept', 'application/json')\n request.add_header('Authorization', format('Basic {base_64_string}'))\n\n try:\n result = openurl(request, timeout=20)\n response_code = result.getcode()\n if response_code == 200:\n Logger.info(format(\"Successfully updated {user_name} user with role {user_role} in Ranger Admin\"))\n return response_code\n else:\n Logger.error(format(\"Unable to update {user_name} user role with {user_role} in Ranger Admin\"))\n return None\n except urllib2.HTTPError as e:\n raise Fail(\n \"HTTPError while updating \" + str(user_name) + \" user role to \" + str(user_role) + \". Reason = \" + str(\n e.code))\n except urllib2.URLError as e:\n raise Fail(\n \"URLError while updating \" + str(user_name) + \" user role to \" + str(user_role) + \". Reason = \" + str(\n e.reason))\n except TimeoutError:\n raise Fail(\"Connection timeout error while updating \" + str(user_name) + \" user role to \" + str(user_role))\n except Exception as err:\n raise Fail(format(\"Error while updating {user_name} user role to {user_role}. Reason = {err}\"))", "def update(request, role_id):\n\n role = get_object_or_404(ProjectRole, pk=role_id)\n\n # require permission to proceed\n must_have_permission(request.user, role.project, \"can_edit_roles\")\n\n permittee = Permittee.objects.get_as_permittee(request.user)\n\n initial_set = list(role.obj_permissions.values_list(\"pk\", flat=True))\n\n # Get the permissions that the user can delegate to others as well\n # as the ones that are already in the role. Obtain DISTINCT values.\n obj_permissions = ObjectPermission.objects.filter_from_instance(\n role.project).filter(\n Q(permissionownership__permittee=permittee,\n permissionownership__can_delegate=True) |\n Q(id__in=initial_set)\n ).distinct()\n\n project_url = reverse(\"project_detail\", args=[role.project.id])\n\n # Use to update the permissions in the ProjectRole object so\n # users with that role are affected from the time this is updated\n def post_save(instance, created):\n from expedient.clearinghouse.roles.models import ObjectPermission\n new_obj_permissions_pks = [ p.pk for p in instance.obj_permissions.all() ]\n for permission in obj_permissions:\n # Add and delete permissions accordingly...\n try:\n instance.remove_permission(permission)\n except:\n pass\n if permission.pk in new_obj_permissions_pks:\n instance.add_permission(permission)\n\n return generic_crud(\n request,\n obj_id=role_id,\n model=ProjectRole,\n template=TEMPLATE_PATH+\"/update.html\",\n redirect=lambda instance: project_url,\n template_object_name=\"role\",\n form_class=ProjectRoleForm,\n extra_form_params={\n \"obj_permissions\": obj_permissions,\n },\n extra_context={\n \"project\": role.project,\n \"breadcrumbs\": (\n (\"Home\", reverse(\"home\")),\n (\"Project %s\" % role.project.name, project_url),\n (\"Update Role %s\" % role.name, request.path),\n )\n },\n post_save = post_save,\n )", "def test_ipam_roles_update(self):\n pass", "def update_station_member(self, station_id, userid, role_id):\n return self._stations_service.update_station_member(station_id, userid, role_id)", "def change_user_role(username, new_role):\n user_connector.change_user_role(username, new_role)", "def _overrideRole(self, newRole, args):\n oldRole = args.get('role', None)\n args['role'] = newRole\n return oldRole", "async def reacrole(self, ctx: commands.Context):\n pass", "def update(self, obj):\n if isinstance(obj, EventUser): # Need to convert list to string before storing in db\n obj.roles = str(obj.roles)\n self.s.commit()\n return obj", "def test_user_id_role_put(self):\n pass", "def update_station_role(\n self,\n station_id,\n station_role_id,\n name=None,\n description=None,\n protected_role=None,\n edit_station_roles=None,\n assign_user_roles=None,\n assign_protected_user_roles=None,\n launch_jobs=None,\n invite_users=None,\n remove_all_users=None,\n remove_invited_users=None,\n view_all_users=None,\n edit_metadata=None,\n add_lz=None,\n remove_any_lz=None,\n view_all_jobs=None,\n control_all_jobs=None,\n view_jobs_on_own_lzs=None,\n control_jobs_on_own_lzs=None,\n view_own_jobs=None,\n control_own_jobs=None,\n view_complete_activity=None,\n edit_station_policy=None,\n edit_own_lz_policy=None,\n edit_lz_policy=None,\n edit_user_policy=None,\n edit_job_resource_limits=None,\n manage_volumes=None,\n reject_user_requests=None\n ):\n request = CreateStationRoleRequest(\n name,\n description,\n protected_role,\n edit_station_roles,\n assign_user_roles,\n assign_protected_user_roles,\n launch_jobs,\n invite_users,\n remove_all_users,\n remove_invited_users,\n view_all_users,\n edit_metadata,\n add_lz,\n remove_any_lz,\n view_all_jobs,\n control_all_jobs,\n view_jobs_on_own_lzs,\n control_jobs_on_own_lzs,\n view_own_jobs,\n control_own_jobs,\n view_complete_activity,\n edit_station_policy,\n edit_own_lz_policy,\n edit_lz_policy,\n edit_user_policy,\n edit_job_resource_limits,\n manage_volumes,\n reject_user_requests\n )\n return self._stations_service.update_station_role(\n station_id, station_role_id, request\n )", "def set_role(userid, role, group, request=None):", "def patch(self, username, role):\n try:\n UserService.add_role_to_user(token_auth.current_user(), username, role)\n return {\"Success\": \"Role Added\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def test_edit_role_change_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Remove controller, Add cinder and ceph-osd roles\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n with RolesPanel() as r:\n r.controller.click()\n r.cinder.click()\n r.ceph_osd.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertNotIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role has been removed')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')\n self.assertIn(ROLE_CEPH, n.nodes[0].roles.text,\n 'Ceph-osd role')", "async def afterHoursSetRole(self, ctx: Context, role: discord.Role):\n await self.config.guild(ctx.guild).get_attr(KEY_ROLE_ID).set(role.id)\n await ctx.send(f\"Set the After Hours role to {role.name}\")", "def update_role(self):\n all_leader = []\n user_records = self.info\n per = Persons()\n for record in user_records:\n if record['leader'] not in all_leader:\n all_leader.append(record['leader'])\n # print len(all_leader)\n # print all_leader\n for leader in all_leader:\n # print leader\n fil = per.get_one({'dn':leader})\n # print fil\n if fil is None:\n print 'this leader %s is not in our db,please check' % leader\n else:\n per.update_one({'dn':leader},{'role':'leader'})", "def testUpdateUserWithBadRole(self):\n info = TUserUpdate(u'user', u's3cr3t', u'new-name', 'new@example.com',\n 'BAD_ROLE')\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.updateUser(session, info)\n yield self.assertFailure(deferred, TBadRequest)", "async def setjoinrole(self, ctx, role):\r\n guild = ctx.message.guild\r\n role = discord.utils.get(guild.roles, name=role)\r\n functions.updatesql(server=ctx.guild.id, joinrole=role.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsess!', color=discord.Colour.from_rgb(255, 0, 255)))", "def add_employeeRole(self, id, role):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employeeRoles values(%s,%s)',\n (id, role))\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save EmployeeRole!\\n(%s)' % (error))", "def update(self, uid):\n raise NotImplementedError", "def put(self, id):\n data = flask.request.json\n user_dao.update_user(id, data)\n return None, 204", "async def _role_repl(self, ctx: Context, *, role: discord.Role):\n\n msg = await ctx.send(\n _(\n \"Are you sure you want to set `{}` as replacement role?\"\n ).format(role.name)\n )\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n\n if pred.result:\n await self.config.guild(ctx.guild).repl_id.set(role.id)\n await ctx.send(\n _(\"Set `{}` as replacement role!\").format(role.name)\n )\n else:\n await ctx.send(_(\"Aborted replacement role setup.\"))", "async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)", "def mark_changed(self, sid, uid=None, rid=None):\n if uid:\n current_db_change_history.updated_users[sid].append(uid)\n elif rid:\n current_db_change_history.updated_roles[sid].append(uid)", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def __restoreRole(self, session):\r\n if self.__role:\r\n _logger.info(\"Switching user to role: %s\" % self.__role)\r\n\r\n session.role = self.__role\r\n self.__role = None\r\n _logger.info(\"Switched user to role: %s\" % session.role)", "async def setRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.edit(roles=roles)\n await ctx.send(f\"Setting {roles_str(person, roles)}\")", "def update_score(score, role):\n if role == 'winner':\n score = score + 1\n if role == 'loser':\n score = score - 1\n return score", "def updateMentors(request):\n\n return updateRole('gsoc_mentor')", "def update_user(id):\n pass", "async def _toggle_role(self, ctx, selfrole: Selfrole):\n\n if selfrole.role not in ctx.message.author.roles:\n try:\n await ctx.message.author.add_roles(selfrole.role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.ADD_ROLE, selfrole.role.name\n )\n\n await ctx.send(f\"{config.YES} {selfrole.join_message}\")\n\n elif selfrole.role in ctx.message.author.roles:\n try:\n await ctx.message.author.remove_roles(selfrole.role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.REMOVE_ROLE, selfrole.role.name\n )\n\n await ctx.send(\n f\"{config.YES} The `{selfrole.role.name}` role was removed from you.\"\n )", "def setRole(self, *args):\n return _libsbml.ReferenceGlyph_setRole(self, *args)", "def setRole(self, *args):\n return _libsbml.SpeciesReferenceGlyph_setRole(self, *args)", "def test_put_role(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)\n\n self.update_as.refresh_from_db()\n model_dict = model_to_dict(self.update_as)\n expected = {\n 'id': self.update_as.pk,\n 'project': self.project.pk,\n 'role': self.role_guest.pk,\n 'user': self.assign_user.pk,\n 'sodar_uuid': self.update_as.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'project': str(self.project.sodar_uuid),\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n 'sodar_uuid': str(self.update_as.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_patch_namespaced_role(self):\n pass", "def role_write(self, fail_on_found=False, disassociate=False, **kwargs):\n\n # Get the role, using only the resource data\n data, self.endpoint = self.data_endpoint(kwargs, ignore=['obj'])\n debug.log('Checking if role exists.', header='details')\n response = self.read(pk=None, fail_on_no_results=True,\n fail_on_multiple_results=True, **data)\n role_data = response['results'][0]\n role_id = role_data['id']\n\n # Role exists, change display settings to output something\n self.configure_display(role_data, kwargs, write=True)\n\n # Check if user/team has this role\n # Implictly, force_on_exists is false for roles\n obj, obj_type, res, res_type = self.obj_res(kwargs)\n debug.log('Checking if %s already has role.' % obj_type,\n header='details')\n data, self.endpoint = self.data_endpoint(kwargs)\n response = self.read(pk=None, fail_on_no_results=False,\n fail_on_multiple_results=False, **data)\n\n msg = ''\n if response['count'] > 0 and not disassociate:\n msg = 'This %s is already a member of the role.' % obj_type\n elif response['count'] == 0 and disassociate:\n msg = 'This %s is already a non-member of the role.' % obj_type\n\n if msg:\n role_data['changed'] = False\n if fail_on_found:\n raise exc.NotFound(msg)\n else:\n debug.log(msg, header='DECISION')\n return role_data\n\n # Add or remove the user/team to the role\n debug.log('Attempting to %s the %s in this role.' % (\n 'remove' if disassociate else 'add', obj_type), header='details')\n post_data = {'id': role_id}\n if disassociate:\n post_data['disassociate'] = True\n client.post('%s/%s/roles/' % (self.pluralize(obj_type), obj),\n data=post_data)\n role_data['changed'] = True\n return role_data", "async def on_guild_role_update(before, after):\r\n if before.name != after.name:\r\n logging.info(\"role %s on %d updated to %s\", before.name, before.guild.id, after.name)\r\n Rules = Query()\r\n # Check targets\r\n db.update({'target': after.name}, (Rules.guild == before.guild.id) & (Rules.target == before.name))\r\n # Check requireds\r\n res = db.search((Rules.guild == after.guild.id) & (Rules.roles.any([before.name])))\r\n for r in res:\r\n row = list(r[\"roles\"])\r\n row.remove(before.name)\r\n row.append(after.name)\r\n db.update({'roles': row}, doc_ids=[r.doc_id])", "def put(self, id):\n return userDao.update(id, api.payload)", "def roles(self, role_id, data, tenant_id=None, api_version=\"v2.1\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/roles/{}\".format(api_version,\n tenant_id,\n role_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def update(self, resource, id, **data):\n self.request('/' + resource + '/' + str(id), 'PUT', body=urllib.urlencode(data))\n return True", "def update_user():", "def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')", "def update_user(uid, pwd, name, role):\r\n session = tables.get_session()\r\n if session is None:\r\n return {'success': False, 'reason': 'failed'}\r\n response = {}\r\n try:\r\n user_account = UserAccount()\r\n user_account.update_user(uid, pwd, name, role, session)\r\n session.commit()\r\n except SQLAlchemyError as err:\r\n LOGGER.error('update user account failed: %s', err)\r\n return response\r\n finally:\r\n session.close()\r\n return response", "def set_role(username, role_name=\"\"):\n\tsession = get_session()\n\tdata = {\"username\": username, \"role\": role_name}\n\tsession.post(\"{url}/api/users/set_role\".format(url=get_registry_url()), json=data)", "def test_ipam_roles_partial_update(self):\n pass", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "def on_update(self):\n\t\tusers = frappe.get_all('User', filters={'role_profile_name': self.name})\n\t\troles = [role.role for role in self.roles]\n\t\tfor d in users:\n\t\t\tuser = frappe.get_doc('User', d)\n\t\t\tuser.set('roles', [])\n\t\t\tuser.add_roles(*roles)", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "def update_workspaces_members_role(\n self,\n context,\n request: TracimRequest,\n hapic_data=None\n ) -> UserRoleWorkspaceInContext:\n app_config = request.registry.settings['CFG']\n rapi = RoleApi(\n current_user=request.current_user,\n session=request.dbsession,\n config=app_config,\n )\n\n role = rapi.get_one(\n user_id=hapic_data.path.user_id,\n workspace_id=hapic_data.path.workspace_id,\n )\n workspace_role = WorkspaceRoles.get_role_from_slug(hapic_data.body.role)\n role = rapi.update_role(\n role,\n role_level=workspace_role.level\n )\n return rapi.get_user_role_workspace_with_context(role)", "def _restoreRole(self, oldRole, args):\n if oldRole:\n args['role'] = oldRole\n else:\n del args['role']", "def update_forum_role_membership(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_by_id(course_id)\r\n has_instructor_access = has_access(request.user, 'instructor', course)\r\n has_forum_admin = has_forum_access(\r\n request.user, course_id, FORUM_ROLE_ADMINISTRATOR\r\n )\r\n\r\n unique_student_identifier = request.GET.get('unique_student_identifier')\r\n rolename = request.GET.get('rolename')\r\n action = request.GET.get('action')\r\n\r\n # default roles require either (staff & forum admin) or (instructor)\r\n if not (has_forum_admin or has_instructor_access):\r\n return HttpResponseBadRequest(\r\n \"Operation requires staff & forum admin or instructor access\"\r\n )\r\n\r\n # EXCEPT FORUM_ROLE_ADMINISTRATOR requires (instructor)\r\n if rolename == FORUM_ROLE_ADMINISTRATOR and not has_instructor_access:\r\n return HttpResponseBadRequest(\"Operation requires instructor access.\")\r\n\r\n if not rolename in [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"Unrecognized rolename '{}'.\".format(rolename)\r\n ))\r\n\r\n user = get_student_from_identifier(unique_student_identifier)\r\n target_is_instructor = has_access(user, 'instructor', course)\r\n # cannot revoke instructor\r\n if target_is_instructor and action == 'revoke' and rolename == FORUM_ROLE_ADMINISTRATOR:\r\n return HttpResponseBadRequest(\"Cannot revoke instructor forum admin privileges.\")\r\n\r\n try:\r\n update_forum_role(course_id, user, rolename, action)\r\n except Role.DoesNotExist:\r\n return HttpResponseBadRequest(\"Role does not exist.\")\r\n\r\n response_payload = {\r\n 'course_id': course_id.to_deprecated_string(),\r\n 'action': action,\r\n }\r\n return JsonResponse(response_payload)", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def promoteRole(event,context):\n #given an email and a role promote the user to that role\n if 'role' not in event or 'auth_email' not in event or 'auth' not in event or 'user_email' not in event or 'roleValue' not in event:\n ret = {\"statusCode\":400,\"body\":\"missing email , auth or role\"}\n return config.add_cors_headers(ret)\n #check if non emprt string\n if(type(event['roleValue']) != bool):\n ret = {\"statusCode\":400,\"body\":\"Inavalid value for role\"}\n return config.add_cors_headers(ret)\n if len(event['role']) < 1:\n ret = {\"statusCode\":400,\"body\":\"Invalid role\"}\n return config.add_cors_headers(ret)\n updates = {\"$set\":{\"role.\"+event['role']:event['roleValue']}}\n #parse authorization email and user email and make call to update api. If coming from vhx-scheduler most likely will be a director\n request_data = {\n \"auth_email\":event[\"auth_email\"],\n \"user_email\":event[\"user_email\"],\n \"auth\":event[\"auth\"],\n \"updates\":updates\n }\n #make request and return the value lcs gives us\n ret = requests.post(config.BASE_URL +'/update', json = (request_data))\n return config.add_cors_headers(ret.json())", "def test_update_role_type_name_level(self):\n response = requests.post(\"http://pulse-rest-testing.herokuapp.com/books\",\n data={\"title\": \"Update Item\", \"author\": \"Inna Korsun\"})\n body = response.json()\n id_book_new = body[\"id\"]\n\n res = requests.get(self.role_url + str(self.id_role))\n level_cur = res.json()[\"level\"]\n role = {\"name\": \"Gandalf\", \"type\": \"Maya\",\"level\":level_cur+10, \"book\":id_book_new}\n response = requests.put(self.role_url+ str(self.id_role), data=role)\n print(response.status_code)\n self.assertEqual(response.status_code, 200)\n\n body = response.json()\n\n self.assertEqual(role[\"name\"], body[\"name\"])\n self.assertEqual(role[\"type\"], body[\"type\"])\n\n res = requests.get(self.role_url + str(body[\"id\"]))#check that item present in role's list\n self.assertEqual(res.status_code, 200)\n self.roles_ids.append(body[\"id\"])\n self.id_book#add id role to list which should be deleted in tearDown", "def put(self, _id):\n payload = self.request.json\n # TODO: validate the json before updating the db\n self.app.db.jobs.update({'_id': int(_id)}, {'$set': {'status': payload.get('status'), 'activity': payload.get('activity')}})", "async def muterole(self, ctx, *, role: discord.Role):\n await queries.update_setting(ctx, \"guild_settings\", \"mute_role_id\", role.id)\n await util.send_success(ctx, f\"Muting someone now gives them the role {role.mention}\")", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def updateSkillForPlayer(self, userid, name, level):\r\n if not isinstance(userid, int):\r\n userid = self.getUserIdFromSteamId(userid)\r\n self.execute(\"UPDATE Skill SET level=? WHERE UserID=? AND name=?\", level, userid, name)", "def edit_user(user_id):\n \"\"\"Cannot update a user's role\"\"\"\n db = get_db()\n users = db.users\n data = request.json\n \n # Check if user_id is a string\n if not isinstance(user_id, str):\n raise APIException(status_code=400, message='user_id not a string')\n \n # Check if user_id is actually an entry in the users collection\n cursor = users.find({\"user_id\": user_id})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='user_id does not exist yet')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple entries with same user_id found. user_id must be unique')\n \n # Validate that the data matches the required format\n # user_id = data['user_id']\n # del data['user_id']\n validate_user_data(data, is_adding_new_user=False)\n # data['user_id'] = user_id\n\n result = users.update_one(\n {\"user_id\": user_id},\n {\n \"$set\": {\n \"name\": data[\"name\"],\n \"phone\": data[\"phone\"],\n \"email\": data[\"email\"],\n \"VenmoUsername\": data[\"VenmoUsername\"],\n \"gender\": data[\"gender\"],\n \"height\": data[\"height\"],\n \"weight\": data[\"weight\"],\n \"age\": data[\"age\"],\n \"bio\": data[\"bio\"],\n \"tags\": data[\"tags\"],\n \"location\": data[\"location\"],\n \"pic_url\": data[\"pic_url\"]\n }\n }\n )\n \n if \"role\" not in data:\n return '', 200\n if data[\"role\"] == \"Mentor\":\n result = users.update_one(\n {\"user_id\": user_id},\n {\n \"$set\": {\n \"rates\": data[\"rates\"],\n \"accepting_clients\": data[\"accepting_clients\"]\n }\n }\n )\n return '', 200", "def update_user(id):\n with app.app_context():\n user = User.query.get(id)\n if user is None:\n return \"User not found\", 404\n skills = validate_skills(request.get_json().get(\"skills\"))\n if not skills:\n return \"Invalid skills\", 400\n\n for skill in skills:\n skill_db = Skill.query.filter_by(name=skill).first()\n if skill_db is None:\n skill_db = Skill(name=skill)\n db.session.add(skill_db)\n \n user.skills = [\n skill for skill in Skill.query.filter(Skill.name.in_(skills)).all()\n ]\n \n users_response = UsersResponse(\n users=[\n {\n \"id\": user.id,\n \"name\": user.name,\n \"skills\": [skill.name for skill in user.skills]\n }\n ]\n )\n db.session.commit()\n return users_response.json(), 200", "def put(uid: int):\n\n if not (director_json := request.get_json()):\n raise NoContentError\n\n if not (director := Director.query.get(uid)):\n raise NotFoundError\n\n try:\n if director.id != director_json[\"id\"]:\n raise BadRequestError\n director.name = director_json[\"name\"]\n db.session.add(director)\n db.session.commit()\n except Exception:\n raise BadRequestError\n\n return f\"updated /directors/{director.id}\", 200", "def setRole(self, room, nick, role):\n if role not in ('moderator', 'participant', 'visitor', 'none'):\n raise TypeError\n query = ET.Element('{http://jabber.org/protocol/muc#admin}query')\n item = ET.Element('item', {'role':role, 'nick':nick}) \n query.append(item)\n iq = self.xmpp.makeIqSet(query)\n iq['to'] = room\n result = iq.send()\n if result is False or result['type'] != 'result':\n raise ValueError\n return True", "async def editrole(self, ctx: context.CustomContext, *, role: Fuzzy[Selfrole]):\n\n new_join_message = await ctx.input(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with the new join message for `{role.role.name}`.\"\n f\"\\n{config.HINT} The current join message is: `{role.join_message}`\"\n )\n\n await self.bot.db.execute(\n \"UPDATE selfrole SET join_message = $1 WHERE role_id = $2\",\n new_join_message,\n role.role.id,\n )\n\n await ctx.send(\n f\"{config.YES} The join message for `{role.role.name}` was updated.\"\n )", "def test_patch_namespaced_role_binding(self):\n pass", "async def edit(self, *, name, roles: Optional[Any] = ..., reason: Optional[Any] = ...):\n ...", "def update(self, username, password, rol, **kwargs):\n\n self.usuario.groups.set([rol])\n if username != self.usuario.username:\n self.usuario.username = username\n \n if password:\n self.usuario.set_password(password)\n \n self.usuario.save()\n\n for field, value in kwargs.items():\n setattr(self, field, value)\n\n self.save()" ]
[ "0.6914042", "0.68597794", "0.6641208", "0.6528812", "0.6419764", "0.6366573", "0.6216717", "0.620263", "0.61314535", "0.60046023", "0.59110934", "0.5883643", "0.58780575", "0.5868561", "0.58423275", "0.57909155", "0.5775993", "0.5757083", "0.5700232", "0.5646141", "0.56256247", "0.56122863", "0.5557507", "0.55324906", "0.55237705", "0.55101806", "0.54970086", "0.5476688", "0.5434514", "0.5408664", "0.54074347", "0.5401419", "0.5401419", "0.5377388", "0.53539455", "0.534478", "0.533564", "0.5335162", "0.53080237", "0.5297001", "0.5289186", "0.5280537", "0.527234", "0.5263069", "0.5246536", "0.5235534", "0.52252984", "0.51499146", "0.5130216", "0.5105932", "0.50730693", "0.5067894", "0.50469023", "0.50381935", "0.5029902", "0.50187945", "0.50038797", "0.4998833", "0.49924195", "0.4992218", "0.49908435", "0.49767554", "0.4956753", "0.49491793", "0.494774", "0.49458215", "0.49444774", "0.49377245", "0.49353302", "0.4931127", "0.49303797", "0.49109426", "0.48835024", "0.487907", "0.48642945", "0.4862015", "0.48592585", "0.48475826", "0.48459688", "0.4827646", "0.48238108", "0.48121363", "0.48087966", "0.4784237", "0.47836444", "0.4781164", "0.47809878", "0.47776654", "0.4758131", "0.4758131", "0.4758131", "0.47488287", "0.47453207", "0.47430718", "0.47355288", "0.47345293", "0.4734445", "0.4725585", "0.47241557", "0.4723355" ]
0.6374615
5
Returns the token and dsn from a key Generate a simple SHA1 hash of the key key is a 64bits integer Token is a 32bits integer, dsn is a 64bits integer
def key2tokenAndDSN(self, key): import binascii import struct import hashlib self.keystr = struct.pack("!Q", key) self.h = hashlib.sha1(self.keystr.rjust(8,'\00')) self.shastr=self.h.digest() # binary #shastr = struct.pack("!IIIII", *struct.unpack("@IIIII",shastr)) #to net self.token, self.dsn = self.shastr[0:4], self.shastr[-8:] #print "raw: %s (len=%i)"%(shastr,len(shastr)) #print "hex: %s"% binascii.hexlify(token), "%s"%binascii.hexlify(dsn) self.d1, self.d2 = struct.unpack("!II",self.dsn) self.token, self.dsn = (struct.unpack("!I",self.token)[0], (long(self.d2)<<32)+self.d1) #print "token: %x"% token #print "dsn: %x" % dsn return (self.token, self.dsn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def _dsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-dss')\n content.write_mpint(numbers.public_numbers.parameter_numbers.p)\n content.write_mpint(numbers.public_numbers.parameter_numbers.q)\n content.write_mpint(numbers.public_numbers.parameter_numbers.g)\n content.write_mpint(numbers.public_numbers.y)\n content.write_mpint(numbers.x)\n return content.data", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def build_serverkeyhash(self):\n server_publickey = self.getfilehttps(self.epo_url + \"srpubkey.bin\")\n self.serverkeyhash = b64encode(mcafee_crypto.SHA1(server_publickey))\n return self.serverkeyhash", "def getMD5(self, key1, key2, last8):\n n1=[]\n s1=0\n n2=[]\n s2=0\n for c in key1:\n if c.isdigit():\n n1.append(c)\n if c.isspace():\n s1+=1\n \n for c in key2:\n if c.isdigit():\n n2.append(c)\n if c.isspace():\n s2+=1\n \n d1 = int(''.join(n1))\n d2 = int(''.join(n2))\n z1=d1/s1\n z2=d2/s2\n \n print \"Key 1 has %d spaces:\" % s1, z1\n print \"Key 2 has %d spaces:\" % s2, z2\n \n mdThing = struct.pack(\">LL\", z1, z2) + last8\n return md5(mdThing).digest()", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def hex_key(uid: Text, mp: Text) -> Text:\n\n key = sha256(mp.encode('utf-8') + admin_pass.encode('utf-8')).hexdigest()\n return sha256(uid.lower().encode('utf-8') + key.encode('utf-8')).hexdigest()[:40]", "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def concat_hash(self, x, symkey):\n msg = '%s%s' % (x, symkey)\n return int(hashlib.sha1(msg).hexdigest(), 16)", "def _hash_djb2(self, key):\n # OPTIONAL STRETCH: Research and implement DJB2\n hash_grotto = 5381\n for k in key:\n hash_grotto = ((hash_grotto << 5) + hash_grotto) + ord(k)\n return hash_grotto & 0xFFFFFFFF", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()", "def fnv1(self, key, seed=0):\n # def fnv1(self, key):\n\n # Your code here\n \"\"\"\n Returns: The FNV-1 hash (64-bit) of a given string. \n \"\"\"\n #Constants : Fails the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a Hash Function\n # hash = offset_basis + seed\n # # hash = offset_basis\n # for c in key:\n # hash = hash * FNV_prime\n # hash = hash ^ ord(c)\n # return hash\n\n \"\"\"\n Returns: The FNV-1a (alternate) hash of a given string\n \"\"\"\n # #Constants : Passes the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a alternate Hash Function\n # hash = offset_basis + seed\n # for c in key:\n # hash = hash ^ ord(c)\n # hash = hash * FNV_prime\n # return hash", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def djb2(self, key):\n # Your code here\n hash = 5381\n for c in key:\n hash = (hash*33)+ ord(c)\n return hash", "def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)", "def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]", "def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]", "def djb2(self, key):\n\n hash = 5381\n for n in key.encode():\n # hash = ((hash << 5) + hash) + n\n hash = hash * 33 + n\n\n return hash\n # return hash & 0xFFFFFFFF", "def make_hash(self):\n timestamp = str(int(round(time.time()*1000)))\n auth = b64encode(config.username) + ':' \\\n + b64encode(config.password) + ':' \\\n + b64encode(timestamp)\n rsa = RSA.load_pub_key(config.public_key)\n encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding)\n key = b64encode(encrypted_auth)\n return key", "def GetHashKey(self, key):\r\n data = pickle.dumps(key)\r\n hashObject = hashlib.sha1(data)\r\n hashValue = hashObject.hexdigest()\r\n value = int(hashValue, 16)\r\n return value", "def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()", "def keyhash(key, code):\n\n crypt = CRYPT(key=key, digest_alg=\"sha512\", salt=None)\n return str(crypt(code.upper())[0])", "def keyhash(key, code):\n\n crypt = CRYPT(key=key, digest_alg=\"sha512\", salt=None)\n return str(crypt(code.upper())[0])", "def get_token_hash(self):\n raise NotImplementedError", "def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id", "def test_get_key_digest_with_integer_key(self):\n\n digest = self.as_connection.get_key_digest(\"test\", \"demo\", 1)\n\n assert isinstance(digest, bytearray)", "def sdbm_hash(key):\n\n hash = 0\n\n for i in range(len(key)):\n hash = ord(key[i]) + (hash << 6) + (hash << 16) - hash\n\n return (hash & 0x7FFFFFFF)", "def generate_key():\n return get_token_generator().generate_token()", "def generate_tokens(callback_key):\n random_hash = generate_random_security_hash()\n\n return generate_security_hash(random_hash, callback_key), random_hash", "def next_session_key(self, session_key):\r\n\t\t## verify hashcode\r\n\t\tif self.__hash == \"\":\r\n\t\t\traise VDOM_exception_sec(\"Hash code is empty\")\r\n\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tif not str(i).isdigit():\r\n\t\t\t\traise VDOM_exception_sec(\"Hash code contains non-digit letter \\\"%c\\\"\" % str(i))\r\n\t\tresult = 0\r\n\t\tfor idx in xrange(len(self.__hash)):\r\n\t\t\ti = self.__hash[idx]\r\n\t\t\tresult += int(self.__calc_hash(session_key, int(i)))\r\n\t\treturn (\"0\"*10 + str(result)[0:10])[-10:]", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def token(self, key):\n if len(key) < 1:\n raise PatternException(_('Password length must be at least 2'))\n\n n = int(key.hex(), base=16)\n d = len(self.gliphs)\n p = []\n\n p.append(self.gliphs[n % d])\n while len(p) < self.length:\n n = int(n/d) if int(n/d) > 0 else n\n if n < d:\n p.append(self.gliphs[n])\n break\n else:\n p.append(self.gliphs[n % d])\n\n if len(p) != self.length:\n raise PatternException()\n\n # while True:\n # p.append(self.gliphs[n % d])\n # n = int(n/d)\n # if n < d or len(p) == self.length:\n # break\n return ''.join(p)", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def _get_raw_key(self, key_id):", "def processor_hash(value):\r\n shared_secret = settings.CC_PROCESSOR['CyberSource'].get('SHARED_SECRET', '')\r\n hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1)\r\n return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\\n', which we don't want\r", "def get_safe_part(key):\r\n version = key[0]\r\n # This function should only be called on versioned keys.\r\n assert version\r\n\r\n # Find the md5 hash part.\r\n c_link_key = key[1]\r\n for key_element in c_link_key[1:]:\r\n if (isinstance(key_element, basestring)\r\n and key_element.startswith('md5:')):\r\n md5 = key_element[4:]\r\n break\r\n\r\n return key[0] + (md5, )", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def get_recovery_code(self, key, user_id):\n user = self.get(user_id, raise_error=True)\n h = hmac.new(key)\n h.update('%s%s%s%s' % (user_id, user.user_name, user.email, user.password))\n return h.hexdigest()", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def _GetServerKey(self, peer_id):\n return hashlib.sha224(peer_id + self.network_id).hexdigest()", "def hash_key(self):", "def key_to_struct(key: RsaKey) -> bytes:\n mod = int_to_bytes(key.n)\n exponent = int_to_bytes(key.e)\n\n return b\"\\x00\\x00\\x00\\x80\" + mod + b\"\\x00\\x00\\x00\\x03\" + exponent", "def create_key ():", "def _hash(self, key):\n # OPTIONAL STRETCH: You may replace the Python hash with DJB2 as a stretch goal\n # return hash(key)\n return self._hash_djb2(key)", "def test_get_key_digest_with_string_key(self):\n\n digest = self.as_connection.get_key_digest(\"test\", \"demo\",\n \"get_digest_key\")\n\n assert isinstance(digest, bytearray)", "def from_b58check(key):\n return HDKey.from_bytes(base58.b58decode_check(key))[0]", "def extractParamsFromKey(key: str) -> []:\n l = base64.b64decode(key).decode('ascii')\n \n param1 = l.split('\\n')[0]\n param2 = l.split('\\n')[1]\n #convert back to int\n param1 = int(param1, 16)\n param2 = int(param2, 16)\n \n if args.verbose : print(param1,param2)\n return [param1,param2]", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def _get_signature(self, timestamp: int or str):\n # Key is fixed.\n ha = hmac.new(key=b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)\n grant_type = self.login_data['grant_type']\n client_id = self.login_data['client_id']\n source = self.login_data['source']\n ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))\n return ha.hexdigest()", "def _makeInternalIdentifier(self, prefix, key):\n\n return '_:' + hashlib.sha1(\n ('fb'+prefix+'key'+key).encode('utf-8')).hexdigest()[1:20]", "def _produce_key(self, passphrase):\n from hashlib import sha256\n pp = bytes(passphrase, 'utf-8')\n hash_alg = sha256(pp)\n for i in range(self._get_key_stretches()):\n d = hash_alg.digest()\n hash_alg.update(d + pp)\n return hash_alg.digest()", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def _get_key_pair_from_sk(sk: ecdsa.SigningKey) -> typing.Tuple[bytes, bytes]:\n return sk.to_string(), \\\n sk.verifying_key.to_string(\"compressed\")", "def convert_key_to_string(key):\n\n return key.encode(encoder=nacl.encoding.Base64Encoder).decode('utf-8')", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def djb2(self, key):\n # Your code here\n # https://stackoverflow.com/questions/1579721/why-are-5381-and-33-so-important-in-the-djb2-algorithm \n hash = 5381\n bytes_to_hash = key.encode() \n\n for byte in bytes_to_hash:\n # << is a bitwise operator; in this case, it shifts the \"bits\" of `hash` left by 5 \n hash = ((hash << 5) + byte)\n \n # for x in key: \n # # << is a bitwise operator; in this case, it shifts the \"bits\" of `hash` left by 5 \"bits\"\n # hash = (( hash << 5) + hash) + ord(x) # hash + 33 + ord(x)\n \n return hash", "async def _token(self, user: discord.User = None, user_id: int = None):\n # This is to be used with the registration URL so that it doesn't contain\n # the user's ID in cleartext. This is so that person A cannot trivially\n # generate person B's URL and assign them to person A's team.\n if not user:\n user = self.bot.get_user(user_id)\n hashh = await self.config.user(user).digest()\n if hashh is None:\n salt = await self.config.user(user).secret()\n if salt is None:\n salt = random_salt()\n await self.config.user(user).secret.set(salt)\n hashh = digest(user.id, salt)\n await self.config.user(user).digest.set(hashh)\n await self.config.set_raw('undigest', hashh, value=user.id)\n return hashh", "def calc_keyid(flags, protocol, algorithm, st):\n # Remove spaces and create the wire format\n st0=st.replace(' ', '')\n st2=struct.pack('!HBB', int(flags), int(protocol), int(algorithm))\n st2+=base64.b64decode(st0)\n \n # Calculate the tag\n cnt=0\n for idx in xrange(len(st2)):\n s=struct.unpack('B', st2[idx])[0]\n if (idx % 2) == 0:\n cnt+=s<<8\n else:\n cnt+=s\n \n ret=((cnt & 0xFFFF) + (cnt>>16)) & 0xFFFF\n \n return(ret)", "def get_symetric_key():\n\treturn os.urandom(32)", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def vault_hash(stark_key, token_id, balance):\n return pedersen_hash(pedersen_hash(stark_key, token_id), balance)", "def _get_keyidv2(pubkey: SupportedKeyTypes) -> int:\n if isinstance(pubkey, RSAPublicKey):\n fmt = serialization.PublicFormat.PKCS1\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n elif isinstance(pubkey, EllipticCurvePublicKey):\n fmt = serialization.PublicFormat.UncompressedPoint\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962, format=fmt)\n else:\n raise UnsupportedAlgorithm(f\"Unsupported public key type {type(pubkey)}\")\n\n default_be = backends.default_backend()\n digest = hashes.Hash(hashes.SHA1(), backend=default_be)\n digest.update(pubbytes)\n keydigest = digest.finalize()\n return int.from_bytes(keydigest[16:], \"big\")", "def hash(self, searchkey):\n return searchkey % self.n", "def get_key_id(self):", "def private_key(self):", "def key():", "def _compute_key(secret_key=None):\n if secret_key is None:\n secret_key = settings.SECRET_KEY\n if isinstance(secret_key, six.string_types):\n secret_key = secret_key.encode()\n return SHA256.new(bytearray(secret_key)).digest()", "def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]", "def key(key):\n return key", "def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def encode_mac_id(self, request, data):\n # There might be multiple secrets in use, if we're in the\n # process of transitioning from one to another. Always use\n # the last one aka the \"most recent\" secret.\n secret = self._get_token_secrets(request)[-1]\n id = tokenlib.make_token(data, secret=secret)\n key = tokenlib.get_token_secret(id, secret=secret)\n return id, key", "def gen_symkey(self, message):\n return int(hashlib.sha1(str(message)).hexdigest(), 16)", "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n params = key_object['private_key_algorithm']['parameters']\n public_key = Integer(pow(\n params['g'].native,\n key_object['private_key'].parsed.native,\n params['p'].native\n ))\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n public_key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key['public_key'].native\n if public_key is None:\n # This is gross, but since the EC public key is optional,\n # and we need to load the private key and use the crypto lib\n # to get the public key, we have to import the platform-specific\n # asymmetric implementation. This is the reason a bunch of the\n # imports are module imports, so we don't get an import cycle.\n public_key_object = load_private_key(key_object).public_key\n public_key = public_key_object.asn1['public_key'].parsed.native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n if isinstance(key_object, PublicKeyInfo):\n if key_object.algorithm == 'rsa':\n key = key_object['public_key'].parsed\n\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n key = key_object['public_key'].parsed\n params = key_object['algorithm']['parameters']\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key_object['public_key'].native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n raise ValueError(pretty_message(\n '''\n key_object must be an instance of the\n asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n classes, not %s\n ''',\n type_name(key_object)\n ))", "def public_key(self):", "def _get_pubickey_sha1_hash(cert):\n pkey = cert.get_pubkey()\n pkey_asn1 = dump_publickey(FILETYPE_ASN1, pkey)\n decoded_pkey, _ = der_decoder.decode(\n pkey_asn1, rfc2459.SubjectPublicKeyInfo())\n pubkey = bit_string_to_bytearray(decoded_pkey['subjectPublicKey'])\n # algorithm = decoded_pkey['algorithm'] # RSA encryption\n sha1_hash = hashlib.sha1()\n sha1_hash.update(pubkey)\n return sha1_hash", "def key(self):\n return self._key.decode('utf-8')", "def _generate_connection_hash(dsn, async=False):\n # Create our hashlib object\n sha = hashlib.sha1()\n\n # Update the hashlib sha1 with the value to hash\n sha.update(\"%s:%s\" % (dsn, async))\n\n # Return the hexdigest of the sha1\n return str(sha.hexdigest())", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def get_key(domain, intent, query_text):\n\n h = sha256(domain.encode())\n h.update(b\"###\")\n h.update(intent.encode())\n h.update(b\"###\")\n h.update(query_text.encode())\n return h.hexdigest()", "def secret_to_key(secret, s2k_specifier):\r\n c = ord(s2k_specifier[8])\r\n EXPBIAS = 6\r\n count = (16+(c&15)) << ((c>>4) + EXPBIAS)\r\n\r\n d = sha1()\r\n tmp = s2k_specifier[:8]+secret\r\n slen = len(tmp)\r\n while count:\r\n if count > slen:\r\n d.update(tmp)\r\n count -= slen\r\n else:\r\n d.update(tmp[:count])\r\n count = 0\r\n return d.digest()", "def sha1(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha1\")", "def make_token():\n return secrets.token_urlsafe(36)", "def __LFSR(self, key: bytearray) -> int:\n x = key.pop()\n out = x ^ key[254] ^ key[244]\n key.append(out)\n return out", "def compute_hash(self, key: int):\n return key % 42", "def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def key_id(key, origin=None):\n\n rdata = _to_rdata(key, origin)\n if key.algorithm == RSAMD5:\n return (rdata[-3] << 8) + rdata[-2]\n else:\n total = 0\n for i in range(len(rdata) // 2):\n total += (rdata[2 * i] << 8) + rdata[2 * i + 1]\n if len(rdata) % 2 != 0:\n total += rdata[len(rdata) - 1] << 8\n total += (total >> 16) & 0xFFFF\n return total & 0xFFFF", "def djb2_hash(key):\n\n hash = 5381\n\n for i in range(len(key)):\n hash = ((hash << 5) + hash) + ord(key[i])\n\n return hash", "def fingerprint_from_var(var):\n vsn = gpg_version()\n cmd = flatten([gnupg_bin(), gnupg_home()])\n if vsn[0] >= 2 and vsn[1] < 1:\n cmd.append(\"--with-fingerprint\")\n\n output = polite_string(stderr_with_input(cmd, var)).split('\\n')\n if not output[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n if vsn[0] >= 2 and vsn[1] < 1:\n return output[1] \\\n .split('=')[1] \\\n .replace(' ', '')\n\n return output[1].strip()" ]
[ "0.59551054", "0.59115833", "0.59115833", "0.58881515", "0.5808499", "0.5731819", "0.5719893", "0.57018846", "0.569349", "0.5679318", "0.5663129", "0.5659303", "0.561188", "0.5586423", "0.5538995", "0.55299336", "0.54740316", "0.5471576", "0.5457066", "0.5430525", "0.54092103", "0.540759", "0.54051906", "0.53947127", "0.5394511", "0.53936934", "0.5391949", "0.5386156", "0.53848135", "0.5376851", "0.53761625", "0.53761625", "0.53552866", "0.53307635", "0.5310375", "0.5307412", "0.5306878", "0.5293437", "0.5278009", "0.5272905", "0.5269197", "0.5249941", "0.52456677", "0.523639", "0.5232369", "0.5229795", "0.52148503", "0.51983327", "0.5196958", "0.5187249", "0.5173798", "0.5167313", "0.5160251", "0.51579285", "0.5151187", "0.5149615", "0.51407826", "0.51274484", "0.5118237", "0.5110472", "0.5107586", "0.5107214", "0.5101541", "0.5100695", "0.50978553", "0.50908667", "0.50807154", "0.5080637", "0.5057587", "0.50538105", "0.50502247", "0.5048834", "0.50467783", "0.50353926", "0.5031116", "0.5025402", "0.50214344", "0.5019009", "0.5007746", "0.4995614", "0.4983269", "0.49805188", "0.49757823", "0.49737924", "0.49733573", "0.49689287", "0.49683744", "0.496529", "0.49614963", "0.4960176", "0.49542564", "0.49490908", "0.49480474", "0.49473548", "0.49422604", "0.49398392", "0.49347633", "0.49279645", "0.4922981", "0.4921819" ]
0.8006403
0
Identify distinct MPTCP Connections that reached Successful handshake Look for Ack packets with MPTCP option Header For each MPTCP connection report Receiver's token value which acts as the connectionID
def mptcp_connections(self, pkts): count = 0 #MPTCP_Capable = 0x0 #MPTCP_CapableACK ---> successful handshake print "======================================================================" print "Successful Handshake --- Look for Ack packets with MPTCP option Header" print """Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from either step 2 or 3 in the first handshake)""" print "Total packets: %s" % len(pkts) print "======================================================================" print "Identifying MPTCP Connections...." for i in range(len(pkts)): if(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0): count +=1 #Count the number of distinct MPTCP connections #Compute the receiver's token self.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key self.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv) #Compute the sender's token self.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key self.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd) print ("%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token)) print "Total MPTCP Connections: %i" % count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta", "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "def process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the first SYN is the client\n # Check if the connection is black listed or not\n conn_id = False\n conn_candidates = fast_conns.get((saddr, daddr, sport, dport), [])\n min_delta = ts_syn_timeout\n for start, duration, cid, fid in conn_candidates:\n if (co.START in mptcp_connections[cid].flows[fid].attr\n and abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds()) < min_delta):\n conn_id = cid\n flow_id = fid\n min_delta = abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds())\n\n if not conn_id:\n black_list.add((saddr, sport, daddr, dport))\n return\n elif conn_id and (saddr, sport, daddr, dport) in black_list:\n black_list.remove((saddr, sport, daddr, dport))\n\n if ((saddr, sport, daddr, dport) in acks and (ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]).total_seconds() <= ts_syn_timeout\n and acks[saddr, sport, daddr, dport][co.S2C] == -1) and conn_id in conn_acks:\n # SYN retransmission! But do nothing particular\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta\n else:\n acks[saddr, sport, daddr, dport] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, co.CONN_ID: conn_id,\n co.FLOW_ID: flow_id}\n conn_acks[conn_id] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, SEQ_C2S: set(), SEQ_S2C: set(), HSEQ_C2S: {},\n HSEQ_S2C: {}}", "def compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing MPTCP DSS retransmissions for\", pcap_filepath)\n acks = {}\n conn_acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n pcap_file.close()", "def process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.ack == acks[daddr, dport, saddr, sport][co.C2S]):\n # SYN/ACK retransmission! But don't do anything special\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state", "def copy_info_to_mptcp_connections(connections, mptcp_connections, failed_conns, acksize_all, acksize_all_mptcp, flow_name, fast_conns=None):\n connection = connections[flow_name]\n conn_id, flow_id = get_flow_name_connection_optimized(connection, mptcp_connections, fast_conns=fast_conns)\n if isinstance(conn_id, (int, long)):\n mptcp_connections[conn_id].flows[flow_id].subflow_id = flow_name\n mptcp_connections[conn_id].flows[flow_id].attr[co.TCP_COMPLETE] = connection.flow.attr[co.TCP_COMPLETE]\n mptcp_connections[conn_id].flows[flow_id].attr[co.START] = connection.flow.attr[co.START]\n mptcp_connections[conn_id].flows[flow_id].attr[co.DURATION] = connection.flow.attr[co.DURATION]\n if co.BACKUP in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.BACKUP] = connection.attr[co.BACKUP]\n if co.SOCKS_PORT in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n if co.SOCKS_PORT not in mptcp_connections[conn_id].attr:\n mptcp_connections[conn_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n\n elif not mptcp_connections[conn_id].attr[co.SOCKS_PORT] == connection.attr[co.SOCKS_PORT] or not mptcp_connections[conn_id].attr[co.SOCKS_DADDR] == connection.attr[co.SOCKS_DADDR]:\n print(\"DIFFERENT SOCKS PORT...\", mptcp_connections[conn_id].attr[co.SOCKS_PORT], connection.attr[co.SOCKS_PORT], mptcp_connections[conn_id].attr[co.SOCKS_DADDR], connection.attr[co.SOCKS_DADDR], conn_id, flow_id)\n\n for direction in co.DIRECTIONS:\n for attr in connection.flow.attr[direction]:\n mptcp_connections[conn_id].flows[flow_id].attr[direction][attr] = connection.flow.attr[direction][attr]\n\n if flow_name in acksize_all[direction]:\n if conn_id not in acksize_all_mptcp[direction]:\n acksize_all_mptcp[direction][conn_id] = {}\n\n acksize_all_mptcp[direction][conn_id][flow_id] = acksize_all[direction][flow_name]\n\n else:\n # This is a TCPConnection that failed to be a MPTCP subflow: add it in failed_conns\n failed_conns[connection.conn_id] = connection\n\n return conn_id, flow_id", "def transmitPollAck(): \n global data\n DW1000.newTransmit()\n data[0] = C.POLL_ACK\n data[17] = anchorID #data[17] is tag Id data[18] is anchor Id\n data[18] = tagID #data[17] is tag Id data[18] is anchor Id\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\n DW1000.setData(data, LEN_DATA)\n DW1000.startTransmit()", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks", "def process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[daddr, dport, saddr, sport][co.C2S] >= 0:\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[daddr, dport, saddr, sport][co.C2S]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.C2S][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if size_payload > 0 and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]:\n # This is a retransmission!\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[daddr, dport, saddr, sport][SEQ][co.S2C]) >= 3000000:\n# for x in range(50000):\n# acks[daddr, dport, saddr, sport][SEQ][co.S2C].popleft()\n\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta", "def RETRANSMIT(self):\n\n ##############################################\n # retransmit all the unacknowledged packets #\n # (all the packets currently in self.buffer) #\n ##############################################\n \n if(self.timeout_hanjing):\n #If we are coming from the timeout state, retransmit all the buffer\n for k,v in self.buffer.items():\n if(self.SACK == 0):\n header_GBN = GBN(type = 'data', len = len(v), hlen = 6, num = k, win = self.win)\n else:\n header_GBN = GBN(type = 'data', options = 1, len = len(v), hlen = 6, num = k, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / v)\n log.debug(\"Sending packet number: %s\", k)\n \n if ((self.Q_3_2 == 1) and (self.dup_ack_hanjing == True) and (self.timeout_hanjing == False)):\n #just retransmit the packet that has been ack'ed 3 times consequtively\n header_GBN = GBN(type = 'data', len = len(self.buffer[self.unack]), hlen = 6, num = self.unack, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[self.unack])\n log.debug(\"Sending packet number: %s\", self.unack)\n \n #Question 3.3\n if(self.SACK == 1 and (self.timeout_hanjing == False) and (self.hlen > 6)):\n if(self.hlen == 9):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) \n if(self.hlen == 12):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) \n if(self.hlen == 15):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) + list(range(self.ledge3, self.ledge3 + self.len3)) \n \n for i in optionalHeader_list:\n optionalHeader_list[optionalHeader_list.index(i)] = i % 2**self.n_bits\n \n #We need to find the difference between the sender buffer, and the optionalHeader_list\n Sender_buffer_keys = list(self.buffer.keys()) \n log.debug(\"The sender buffer: %s\", Sender_buffer_keys)\n #Trimmed_sender_buffer includes the buffer list only up to the last packet number in the optional header list)\n trimmed_sender_buffer = Sender_buffer_keys[:Sender_buffer_keys.index(optionalHeader_list[-1])+1]\n #Retrans_list is the list of keys to be retransmitted\n log.debug(\"Trimmed Sender Buffer: %s\", trimmed_sender_buffer)\n log.debug(\"Optional Header List: %s\", optionalHeader_list)\n Retrans_list = [item for item in trimmed_sender_buffer if item not in optionalHeader_list]\n log.debug(\"SACK: packets should be retransmitted: %s\", Retrans_list)\n for i in Retrans_list:\n header_GBN = GBN(type = 'data', options = 1 , len = len(self.buffer[i]), hlen = 6, num = i, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[i])\n log.debug(\"SACK Retransmission: Sending packet number: %s\", i)\n # back to SEND state\n self.dup_ack_hanjing = False\n self.timeout_hanjing = False\n raise self.SEND()", "def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))", "def transmitPollAck(): \r\n global data\r\n DW1000.newTransmit()\r\n data[0] = C.POLL_ACK\r\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\r\n DW1000.setData(data, LEN_DATA)\r\n DW1000.startTransmit()", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def Connect(self):\r\n #sleep(1)\r\n #self.src_ref = randint(1, 20)\r\n self.src_ref = 10\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.settimeout(self.timeout)\r\n self.s.connect((self.ip, self.port))\r\n self.s.send(TPKTPacket(COTPConnectionPacket(self.dst_ref,\r\n self.src_ref,\r\n self.dst_tsap,\r\n self.src_tsap,\r\n 0x0a)).pack())\r\n reply = self.s.recv(1024)\r\n _ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)\r\n\r\n self.NegotiatePDU()", "def handshake(self):\n print(\"No: \"+str(len(self.threads)))\n indexes_to_del = []\n if len(self.threads)>2:\n raise IOError\n for i in range(0,len(self.threads)):\n if not self.threads[i].is_alive():\n indexes_to_del.append(i)\n \n for i in indexes_to_del:#do this otherwise if deleted above, out of index error occurs\n del self.threads[i]\n \n while True:\n data = self.s.recv(1024)\n if data ==\"O\":\n print(\"Hanshake Received\")\n return", "def print_connection_terminated(pkt):\n print_headers(pkt, overwrite_min=0)\n print(red(\"!!!! Attention: TCP/OpenFlow Connection Terminated!!\\n\"))", "def describe_connections_on_interconnect(interconnectId=None):\n pass", "def get_dss_and_data_ack(tcp):\n dss, dack, dss_is_8_bytes = False, False, False\n opt_list = dpkt.tcp.parse_opts(tcp.opts)\n for option_num, option_content in opt_list:\n # Only interested in MPTCP with subtype 2\n if option_num == 30 and len(option_content):\n if ord(option_content[0]) == 32:\n flags = ord(option_content[1])\n dss_is_8_bytes = (flags & 0x08) != 0\n dss_is_present = (flags & 0x04) != 0\n dack_is_8_bytes = (flags & 0x02) != 0\n dack_is_present = (flags & 0x01) != 0\n if dack_is_present and not dss_is_present:\n range_max = 8 if dack_is_8_bytes else 4\n dack = 0\n for i in range(range_max):\n dack = dack * 256 + ord(option_content[2 + i])\n\n elif dss_is_present and dack_is_present:\n range_max_dack = 8 if dack_is_8_bytes else 4\n dack = 0\n for i in range(range_max_dack):\n dack = dack * 256 + ord(option_content[2 + i])\n\n start_dss = 2 + range_max_dack\n range_max_dss = 8 if dss_is_8_bytes else 4\n dss = 0\n for i in range(range_max_dss):\n dss = dss * 256 + ord(option_content[start_dss + i])\n\n elif dss_is_present and not dack_is_present:\n global dss_not_ack_warning\n if not dss_not_ack_warning:\n print(\"Case where dss_is_present and dack is not present (not compliant with Linux implementation): continue\", file=sys.stderr)\n dss_not_ack_warning = True\n\n start_dss = 2\n range_max_dss = 8 if dss_is_8_bytes else 4\n dss = 0\n for i in range(range_max_dss):\n dss = dss * 256 + ord(option_content[start_dss + i])\n\n return dss, dack, dss_is_8_bytes", "def handleSent(): \r\n global sentAck\r\n sentAck = True", "def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def extract_tstat_data_tcp_complete(filename, connections, conn_id):\n log_file = open(filename)\n data = log_file.readlines()\n for line in data:\n # Case 1: line start with #; skip it\n if not line.startswith(\"#\"):\n # Case 2: extract info from the line\n info = line.split()\n conn_id += 1\n connection = TCPConnection(conn_id)\n connection.flow.attr[co.TCP_COMPLETE] = True\n connection.flow.attr[co.SADDR] = co.long_ipv6_address(info[0])\n connection.flow.attr[co.DADDR] = co.long_ipv6_address(info[14])\n connection.flow.attr[co.SPORT] = info[1]\n connection.flow.attr[co.DPORT] = info[15]\n connection.flow.detect_ipv4()\n connection.flow.indicates_wifi_or_cell()\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.START] = timedelta(seconds=float(info[28])/1000)\n connection.flow.attr[co.DURATION] = float(info[30]) / 1000.0\n connection.flow.attr[co.C2S][co.PACKS] = int(info[2])\n connection.flow.attr[co.S2C][co.PACKS] = int(info[16])\n # Note that this count is about unique data bytes (sent in the payload)\n connection.flow.attr[co.C2S][co.BYTES] = int(info[6])\n connection.flow.attr[co.S2C][co.BYTES] = int(info[20])\n # This is about actual data bytes (sent in the payload, including retransmissions)\n connection.flow.attr[co.C2S][co.BYTES_DATA] = int(info[8])\n connection.flow.attr[co.S2C][co.BYTES_DATA] = int(info[22])\n\n connection.flow.attr[co.C2S][co.PACKS_RETRANS] = int(info[9])\n connection.flow.attr[co.S2C][co.PACKS_RETRANS] = int(info[23])\n connection.flow.attr[co.C2S][co.BYTES_RETRANS] = int(info[10])\n connection.flow.attr[co.S2C][co.BYTES_RETRANS] = int(info[24])\n\n connection.flow.attr[co.C2S][co.PACKS_OOO] = int(info[11])\n connection.flow.attr[co.S2C][co.PACKS_OOO] = int(info[25])\n\n connection.flow.attr[co.C2S][co.NB_SYN] = int(info[12])\n connection.flow.attr[co.S2C][co.NB_SYN] = int(info[26])\n connection.flow.attr[co.C2S][co.NB_FIN] = int(info[13])\n connection.flow.attr[co.S2C][co.NB_FIN] = int(info[27])\n connection.flow.attr[co.C2S][co.NB_RST] = int(info[3])\n connection.flow.attr[co.S2C][co.NB_RST] = int(info[17])\n connection.flow.attr[co.C2S][co.NB_ACK] = int(info[4])\n connection.flow.attr[co.S2C][co.NB_ACK] = int(info[18])\n\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.C2S][co.TIME_FIRST_PAYLD] = float(info[31]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_PAYLD] = float(info[32]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD] = float(info[33]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD] = float(info[34]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_FIRST_ACK] = float(info[35]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_ACK] = float(info[36]) / 1000.0\n\n connection.flow.attr[co.C2S][co.RTT_SAMPLES] = int(info[48])\n connection.flow.attr[co.S2C][co.RTT_SAMPLES] = int(info[55])\n connection.flow.attr[co.C2S][co.RTT_MIN] = float(info[45])\n connection.flow.attr[co.S2C][co.RTT_MIN] = float(info[52])\n connection.flow.attr[co.C2S][co.RTT_MAX] = float(info[46])\n connection.flow.attr[co.S2C][co.RTT_MAX] = float(info[53])\n connection.flow.attr[co.C2S][co.RTT_AVG] = float(info[44])\n connection.flow.attr[co.S2C][co.RTT_AVG] = float(info[51])\n connection.flow.attr[co.C2S][co.RTT_STDEV] = float(info[47])\n connection.flow.attr[co.S2C][co.RTT_STDEV] = float(info[54])\n connection.flow.attr[co.C2S][co.TTL_MIN] = float(info[49])\n connection.flow.attr[co.S2C][co.TTL_MIN] = float(info[56])\n connection.flow.attr[co.C2S][co.TTL_MAX] = float(info[50])\n connection.flow.attr[co.S2C][co.TTL_MAX] = float(info[57])\n\n connection.flow.attr[co.C2S][co.SS_MIN] = int(info[71])\n connection.flow.attr[co.S2C][co.SS_MIN] = int(info[94])\n connection.flow.attr[co.C2S][co.SS_MAX] = int(info[70])\n connection.flow.attr[co.S2C][co.SS_MAX] = int(info[93])\n\n connection.flow.attr[co.C2S][co.CWIN_MIN] = int(info[76])\n connection.flow.attr[co.S2C][co.CWIN_MIN] = int(info[99])\n connection.flow.attr[co.C2S][co.CWIN_MAX] = int(info[75])\n connection.flow.attr[co.S2C][co.CWIN_MAX] = int(info[98])\n\n connection.flow.attr[co.C2S][co.NB_RTX_RTO] = int(info[78])\n connection.flow.attr[co.S2C][co.NB_RTX_RTO] = int(info[101])\n connection.flow.attr[co.C2S][co.NB_RTX_FR] = int(info[79])\n connection.flow.attr[co.S2C][co.NB_RTX_FR] = int(info[102])\n connection.flow.attr[co.C2S][co.NB_REORDERING] = int(info[80])\n connection.flow.attr[co.S2C][co.NB_REORDERING] = int(info[103])\n connection.flow.attr[co.C2S][co.NB_NET_DUP] = int(info[81])\n connection.flow.attr[co.S2C][co.NB_NET_DUP] = int(info[104])\n connection.flow.attr[co.C2S][co.NB_UNKNOWN] = int(info[82])\n connection.flow.attr[co.S2C][co.NB_UNKNOWN] = int(info[105])\n connection.flow.attr[co.C2S][co.NB_FLOW_CONTROL] = int(info[83])\n connection.flow.attr[co.S2C][co.NB_FLOW_CONTROL] = int(info[106])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_RTO] = int(info[84])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_RTO] = int(info[107])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_FR] = int(info[85])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_FR] = int(info[108])\n\n connection.attr[co.C2S][co.BYTES] = {}\n connection.attr[co.S2C][co.BYTES] = {}\n\n connection.flow.attr[co.C2S][co.TIMESTAMP_RETRANS] = []\n connection.flow.attr[co.S2C][co.TIMESTAMP_RETRANS] = []\n\n connection.flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n\n connections[conn_id] = connection\n\n log_file.close()\n return connections, conn_id", "def delcomptcprxpackets(self) :\n\t\ttry :\n\t\t\treturn self._delcomptcprxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def handleReceived():\r\n global receivedAck\r\n receivedAck = True", "def handleReceived(): \n global receivedAck\n receivedAck = True", "def snmpqosqos_sch_session_conns(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_session_conns\n\t\texcept Exception as e:\n\t\t\traise e", "def on_ctcp(self, raw_msg, source, msg, **kwargs):", "def describe_connections(connectionId=None):\n pass", "def handleSent(): \n global sentAck\n sentAck = True", "def decomptcprxpackets(self) :\n\t\ttry :\n\t\t\treturn self._decomptcprxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def SynAckAttack(host, cmds):\n\tprint(\"\\n###########################################\")\n\tprint(\"# Starting SYN ACK Attack..\")\n\tprint(\"###########################################\\n\")\n\t# ports=[]\n\ttry:\n\t\tamount = int(cmds[3])\n\texcept IndexError:\n\t\tamount = 1\n\ttry:\n\t\tports = cmds[2]\n\t\tports = [int(p) for p in ports.split('.')]\n\texcept IndexError:\n\t\tports = []\n\n\t# hosts = state.host_and_ports.keys()\n\t# ports = []\n\tif not ports:\n\t\tprint(\"***\\n[e]: No ports were specifed, please enter them like so: 80,81,88,3000\")\n\t\tprint(\"[cmds]: \", cmds)\n\t\tprint()\n\t\treturn\n\ttry:\n\t\t\t# for host in hosts:\n\t#\tprint(f\"# Attacking Target: {host}\")\n\t\tfor hostPort in ports:\n\t\t\tfor x in range(0, amount):\n\t\t\t\t# Build a random packet\n\t\t\t\ts_port = randInt()\n\t\t\t\ts_eq = randInt()\n\t\t\t\tw_indow = randInt()\n\n\t\t\t\tIP_Packet = IP()\n\t\t\t\tIP_Packet.src = randomIP()\n\t\t\t\tIP_Packet.dst = host\n\n\t\t\t\tTCP_Packet = TCP()\n\t\t\t\tTCP_Packet.sport = s_port\n\t\t\t\tTCP_Packet.dport = hostPort\n\t\t\t\tTCP_Packet.flags = \"S\"\n\t\t\t\tTCP_Packet.seq = s_eq\n\t\t\t\tTCP_Packet.window = w_indow\n\n\t\t\t\t# Send the packet\n\t\t\t\tsend(IP_Packet/TCP_Packet)\n\t\tprint()\n\t\tprint('***')\n\t\tprint(\"packets explanation:\")\n\t\tprint(\"sent %s packets of this form: \" % amount)\n\t\tIP_Packet.show()\n\t\tprint(\"ihl: internet header length\")\n\t\tprint(\"tos: type of service\")\n\t\tprint(\"frag: fragement offset\")\n\t\tprint(\"ttl: time to live [s]\")\n\t\tprint(\"proto: Protocol num, 0 = IPv6\")\n\t\tprint(\"chksum: check sum for error checking\")\n\t\tprint(\"***\")\n\t\tprint('TCP SYN packet: ')\n\t\tTCP_Packet.show()\n\t\tprint(\"sport: identifies sending port\")\n\t\tprint(\"dport: identifies receiving port\")\n\t\tprint(\"seq: seqence number. Dual role. If SYN flag is set (1), it's initial seqence number.\")\n\t\tprint(\" if flag is clear (0) this is accumulated seqence number for current session.\")\n\t\tprint(\"ack: ack number. If ACK flag set then this value is what sender of ACK expects to get back\")\n\t\tprint(\"dataofs: specifies the size of the TCP header in 32-bit words\")\n\t\tprint(\"flags: there are 9 1-bit flags\")\n\t\tprint(\"window: size of data windows sender of segment willing to receive back\")\n\t\tprint(\"chksum: error checking checksum\")\n\t\tprint(\"urgptr: position offset from the seqence number of last urgent data byte.\")\n\t\t# get grasp of all flags set in the Scapy TCP packet\n\t\t# obv, it's going to just be SYN, set with 'S'\n\t\tflags_vals = {\n\t\t\t'F': 0,\n\t\t\t'S': 0,\n\t\t\t'R': 0,\n\t\t\t'P': 0,\n\t\t\t'A': 0,\n\t\t\t'U': 0,\n\t\t\t'E': 0,\n\t\t\t'C': 0,\n\t\t}\n\t\tflags = {\n\t\t\t'F': 'FIN',\n\t\t\t'S': 'SYN',\n\t\t\t'R': 'RST',\n\t\t\t'P': 'PSH',\n\t\t\t'A': 'ACK',\n\t\t\t'U': 'URG',\n\t\t\t'E': 'ECE',\n\t\t\t'C': 'CWR',\n\t\t\t}\n\t\tfor f in TCP_Packet.sprintf('%TCP.flags%'):\n\t\t\tflags_vals[f] = 1\n\t\tprint('flags set in TCP SYN packet')\n\t\tprint([flags[x] for x in TCP_Packet.sprintf('%TCP.flags%')])\n\t\tprint(flags_vals)\n\texcept Exception as e:\n\t\tprint('in ping flood: ')\n\t\tprint('something was wrong with arguments: ', cmds)\n\t\tprint('\\n', e)\n\t\treturn", "def on_pes_packet_complete(self):\n pass", "def receive_one_ping(self, current_socket):\n import select\n from struct import pack, unpack\n\n class HeaderInformation(dict):\n \"\"\" Simple storage received IP and ICMP header informations \"\"\"\n def __init__(self, names, struct_format, data):\n unpacked_data = unpack(struct_format, data)\n dict.__init__(self, dict(zip(names, unpacked_data)))\n\n ICMP_MAX_RECV = 2048 # Max size of incoming buffer\n timeout = self.timeout / 1000.0\n\n while True: # Loop while waiting for packet or timeou+t\n select_start = self.timer()\n inputready, outputready, exceptready = select.select([current_socket], [], [], timeout)\n select_duration = (self.timer() - select_start)\n if inputready == []: # timeout\n return None, 0, 0, 0, 0\n\n receive_time = self.timer()\n\n packet_data, address = current_socket.recvfrom(ICMP_MAX_RECV)\n\n icmp_header = HeaderInformation(\n names=[\n \"type\", \"code\", \"checksum\",\n \"packet_id\", \"seq_number\"\n ],\n struct_format=\"!BBHHH\",\n data=packet_data[20:28]\n )\n\n if icmp_header[\"packet_id\"] == self.own_id: # Our packet\n ip_header = HeaderInformation(\n names=[\n \"version\", \"type\", \"length\",\n \"id\", \"flags\", \"ttl\", \"protocol\",\n \"checksum\", \"src_ip\", \"dest_ip\"\n ],\n struct_format=\"!BBHHHBBHII\",\n data=packet_data[:20]\n )\n packet_size = len(packet_data) - 28\n ip = socket.inet_ntoa(pack(\"!I\", ip_header[\"src_ip\"]))\n # XXX: Why not ip = address[0] ???\n return receive_time, packet_size, ip, ip_header, icmp_header\n\n timeout = timeout - select_duration\n if timeout <= 0:\n return None, 0, 0, 0, 0", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]):\n # SYN/ACK retransmission!\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta", "def _identify_connection(self):\n pass #nothing to identify...\n #raise NotImplementedError(\"Implement!\")", "def _check_connectionlist(self):\n self.symbol = self.scanner.get_symbol()\n # Repeatedly call _check_connectionline() until END CONNECTIONS\n while (\n not self._is_end(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self._check_connectionline()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()", "def myTransmit(self, connection, apdu):\n # trace request :\n #print 'sending : \\t', toHexString(apdu)\n response, sw1, sw2 = connection.transmit( apdu )\n # trace response :\n #if None == response: response=[]\n #print 'response : \\t', toHexString(response), '\\nstatus words : \\t', \"%x %x\" % (sw1, sw2)\n if sw1 in range(0x61, 0x6f):\n print \"Error: sw1: %x sw2: %x\" % (sw1, sw2)\n return response, sw1, sw2", "def handle_ctrl_packet(conn, pkt):\n global sessions\n ip, port = conn.getpeername()\n logging.info(\"Server receives msg <%s> from %s \" %\n (iMSG[pkt.header.mtype], ip))\n session = find_session(sessions, pkt.body)\n if pkt.header.mtype == MSG['CHK_FILE']:\n send_file_meta(conn, pkt.body.filename)\n\n elif pkt.header.mtype == MSG['REQ_SEG']:\n if session is None:\n session = create_new_session(sessions, pkt.body)\n ret = call_session_process(session, (pkt, ip))\n if ret is 0:\n pkt = CCPacket(CCHeader(MSG['SESSIONMETA']), session.meta)\n try:\n conn.send(pkt.packed()) # Send session's data\n # Add to client list of the session\n logging.info(\"Add %s into client list of segment %d\"\n % (ip, session.meta.segmentid))\n session.add_client(HostInfo(ip, session.meta.sessionid))\n session.add_client_coop(HostInfo(ip, session.meta.sessionid))\n except Exception as detail:\n logging.warning(\"Caught exception receiving from %s for segment %d: %s.\"\n % (ip, session.meta.segmentid, detail))\n else:\n logging.info(\"Call_cession_process return -1\")\n \n elif pkt.header.mtype == MSG['HEARTBEAT']:\n if session is None:\n # No such session, error notice\n pass\n else:\n ret = call_session_process(session, (pkt, ip))\n if ret is 0:\n conn.send(pkt.packed()) # Echo back the heartbeat\n else:\n # Session no reply, error notice\n pass\n session.client_heartbeat(addr[0])\n \n elif pkt.header.mtype == MSG['REQ_STOP']:\n # remove client from session\n if session is None:\n # No such session, error notice to client\n pass\n else:\n ret = call_session_process(session, (pkt, ip))\n if ret is 0:\n session.remove_client(ip)\n conn.send(CCPacket(CCHeader(MSG['REQ_STOP_ACK']), session.meta).packed())\n else:\n # Child no reply, error notice to client\n pass\n \n elif pkt.header.mtype == MSG['CHK_PEERS']:\n if session is None:\n pass\n else:\n send_peers_info(conn, session, ip)\n # conn.close()\n \n elif pkt.header.mtype == MSG['EXIT']:\n if session is None:\n return\n ret = call_session_process(session, (pkt, ip))\n if ret is 0:\n session.remove_client(ip)\n # Remove the client from cooplist of all sessions\n for v in sessions.values():\n v[0].remove_client_coop(ip)\n conn.send(CCPacket(CCHeader(MSG['EXIT_ACK'])).packed())\n else:\n pass\n conn.close()", "def verify_mpls_forwarding_table_gid_counter(device, \n prefix_type,\n bytes_labeled_switched,\n mdt_data_cnt=1, \n max_time=60,\n check_interval=10,\n expected_prefix_exempted=2):\n \n timeout = Timeout(max_time, check_interval)\n while timeout.iterate(): \n try:\n parsed_output1 = device.parse(\"show mpls forwarding-table | sect gid\")\n time.sleep(20)\n parsed_output2 = device.parse(\"show mpls forwarding-table | sect gid\")\n except SchemaEmptyParserError:\n raise SchemaEmptyParserError(\n \"Failed to parse commands\"\n )\n \n cnt=0\n prefix_exempted=0\n # Verify counters are incrementing or not for mentioned prefix\n for labels in parsed_output1.q.get_values(\"local_label\"):\n first_counter = parsed_output1.q.contains(\n labels).get_values(\"bytes_label_switched\")[0]\n second_counter = parsed_output2.q.contains(\n labels).get_values(\"bytes_label_switched\")[0]\n prefix = parsed_output1.q.contains(\n labels).get_values('prefix_or_tunnel_id')[0]\n if (int(second_counter) - int(first_counter)) > int(bytes_labeled_switched):\n cnt += 1\n else:\n if prefix_exempted <= expected_prefix_exempted:\n prefix_exempted += 1\n else:\n return False\n \n \n # verfiy mdt_data_cnt if set\n if cnt == mdt_data_cnt:\n return True\n timeout.sleep() \n return False", "def fw_handshake_2_4_tkip( self , vendor , eapolMIC = True , eapolMICFlag = True , customFlaglist = None , customRC = None ):\n\t\tparameterList = 'vendor=' + str(vendor) + ',eapolMIC=' + str(eapolMIC) + ',eapolMICFlag=' + str(eapolMICFlag) + ',customFlaglist=' + str(customFlaglist) + ',customRC=' + str(customRC)\n\t\tself.logger.log( self.logger.TRANSMIT , 'EAPOL 4-Way Handshake Message 2/4 TKIP (' + parameterList + ')')\n\t\ttry:\n\t\t\n\t\t\t# Create an empty EAPOL WPA Key packet.\n\t\t\tpacket \t\t= EAPOL( version=1 , type='EAPOL-Key' )/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tpacketKey \t= packet.getlayer( EAPOL_WPAKey )\n\t\t\tif vendor != 'NONE':\n\t\t\t\tvendorInfo = Dot11Elt( ID='vendor' , info=getVendorInfo( type=vendor ) )\n\t\t\tflaglist = ['HMAC_MD5_RC4','idx0','pairwise']\n\t\t\tif eapolMICFlag is True:\n\t\t\t\tflaglist.append('mic')\n\t\t\t\n\t\t\t# Fill in the fields.\n\t\t\tif customFlaglist is not None:\n\t\t\t\tflaglist = customFlaglist\n\t\t\tpacketKey.KeyInfo = self.__getKeyInformation( flaglist )\n\t\t\tif customRC is not None:\n\t\t\t\tif customRC == 'lower':\t\n\t\t\t\t\tself.replayCounter -= 1\n\t\t\t\telif customRC == 'higher':\n\t\t\t\t\tself.replayCounter += 1\n\t\t\tpacketKey.ReplayCounter = self.replayCounter\n\t\t\tpacketKey.Nonce = self.SNonce\n\t\t\tif vendor != 'NONE':\n\t\t\t\tpacketKey.WPAKeyLength \t= len( vendorInfo )\n\t\t\t\tpacketKey.WPAKey \t= vendorInfo\n\t\t\t\n\t\t\t# Calculate and add the MIC.\n\t\t\tif eapolMIC is True:\n\t\t\t\tmic = HMAC.new( self.KCK , msg=str( packet ) , digestmod=Crypto.Hash.MD5 )\n\t\t\t\tpacketKey.WPAKeyMIC = mic.digest()\n\t\t\t\n\t\t\t# Transmit.\n\t\t\tsendp(RadioTap()/\n\t\t\t\tDot11( addr1=self.addr1 , addr2=self.addr2 , addr3=self.addr1 , type='Data' , subtype=0x00 , FCfield='to-DS' )/\n\t\t\t\tLLC( dsap=0xaa , ssap=0xaa , ctrl=0x03 )/\n\t\t\t\tSNAP( OUI=0x000000 , code=0x888e )/\n\t\t\t\tpacket,\n\t\t\t\tiface=self.iface , verbose=False )\n\t\t\t\t\n\t\texcept:\n\t\t\traise", "def verify_and_respond_open_channel_from_remote_and_send_config_req(self, psm=0x33):\n request = L2capCaptures.ConnectionRequest(psm)\n assertThat(self.control_channel).emits(request)\n\n sid = request.get().GetIdentifier()\n dcid = request.get().GetSourceCid()\n scid = dcid\n channel = CertL2capChannel(self._device, scid, dcid, self._acl.acl_stream, self._acl, self.control_channel)\n self.scid_to_channel[scid] = channel\n\n # Connection response and config request combo packet\n conn_rsp_and_config_req = RawBuilder([\n 0x03, sid, 0x08, 0x00, dcid, 0x00, dcid, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, sid + 1, 0x04, 0x00, dcid,\n 0x00, 0x00, 0x00\n ])\n self.control_channel.send(conn_rsp_and_config_req)\n\n return channel", "def fw_handshake_3_4_tkip( self , packet ):\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header, and\n\t\t# if so assert the correctness of the FCS.\n\t\tradiotapFCSFlag = hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet )\n\t\t\tpacket.getlayer( EAPOL_WPAKey ).remove_payload() # Remove the FCS.\n\t\t\t\n\t\t# Assert on the flags in the Key Information to verify it is FWHS Message 3/4.\n\t\tkeyinfoReceived \t= packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\tself.replayCounter\t= packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\tflaglist\t\t= ['HMAC_MD5_RC4','idx0','pairwise','install','ack','mic']\n\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t'The received packet is not 4-Way Handshake Message 3/4.'\n\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL 4-Way Handshake Message 3/4 TKIP' )\n\t\t\n\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\tself.__assertWPAKeyMIC( packet , Crypto.Hash.MD5 )", "def delcomptcptxpackets(self) :\n\t\ttry :\n\t\t\treturn self._delcomptcptxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def get_connection_info(otu_table_fp, num_meta, meta_dict):\r\n con_by_sample = defaultdict(set)\r\n node_file = []\r\n edge_file = []\r\n red_nodes = defaultdict(int)\r\n red_node_file = []\r\n red_edge_file = []\r\n multi = defaultdict(list)\r\n edge_from = []\r\n to = []\r\n otu_dc = defaultdict(int)\r\n degree_counts = defaultdict(int)\r\n sample_dc = defaultdict(int)\r\n sample_num_seq = defaultdict(int)\r\n con_list = []\r\n\r\n otu_table = parse_biom_table(open(otu_table_fp, 'U'))\r\n\r\n # if lineages == []:\r\n # is_con = False\r\n # else:\r\n # is_con = True\r\n\r\n is_con = False\r\n # This could be moved to OTU table sub-class\r\n if (otu_table.ObservationMetadata is not None and\r\n 'taxonomy' in otu_table.ObservationMetadata[0]):\r\n is_con = True\r\n\r\n for (otu_values, otu_id, otu_metadata) in otu_table.iterObservations():\r\n # for idx,l in enumerate(otu_table):\r\n # data = l\r\n\r\n #to_otu = otu_ids[idx]\r\n con = ''\r\n if is_con:\r\n #con = ':'.join(lineages[idx][:6])\r\n con = ':'.join(otu_metadata['taxonomy'][:6])\r\n con = con.replace(\" \", \"_\")\r\n con = con.replace(\"\\t\", \"_\")\r\n # Not required: otu_values (data) is always numpy vector\r\n #counts = map(float,data)\r\n if con not in con_list:\r\n con_list.append(con)\r\n #non_zero_counts = nonzero(counts)[0]\r\n non_zero_counts = otu_values.nonzero()[0]\r\n degree = len(non_zero_counts)\r\n weighted_degree = sum(otu_values)\r\n# node_file_line = [to_otu,'','otu_node',str(degree),\\\r\n# str(weighted_degree),con]\r\n node_file_line = [otu_id, '', 'otu_node', str(degree),\r\n str(weighted_degree), con]\r\n node_file_line.extend(['otu'] * num_meta)\r\n node_file.append('\\t'.join(node_file_line))\r\n\r\n if len(non_zero_counts) != 1:\r\n red_node_file.append('\\t'.join(node_file_line))\r\n\r\n otu_dc[degree] += 1\r\n degree_counts[degree] += 1\r\n #samples = [sample_ids[i] for i in non_zero_counts]\r\n samples = [otu_table.SampleIds[i] for i in non_zero_counts]\r\n for i, s in enumerate(samples):\r\n if s not in meta_dict.keys():\r\n continue\r\n con_by_sample[s].update(samples[0:i])\r\n con_by_sample[s].update(samples[i + 1:])\r\n #sample_num_seq[s] += float(data[non_zero_counts[i]])\r\n sample_num_seq[s] += float(otu_values[non_zero_counts[i]])\r\n\r\n edge_from.append(s)\r\n # to.append(to_otu)\r\n to.append(otu_id)\r\n meta = meta_dict[s]\r\n meta[1] += 1\r\n #data_num = str(data[non_zero_counts[i]])\r\n data_num = str(otu_values[non_zero_counts[i]])\r\n # edge_file.append('\\t'.join([s, to_otu, \\\r\n # data_num, con, meta[0]]))\r\n edge_file.append('\\t'.join([s, otu_id,\r\n data_num, con, meta[0]]))\r\n #multi[to_otu].append((s,float(data[non_zero_counts[i]]), meta[0]))\r\n multi[otu_id].append(\r\n (s, float(otu_values[non_zero_counts[i]]), meta[0]))\r\n if len(non_zero_counts) == 1:\r\n #red_nodes[(sample_ids[non_zero_counts[0]],meta[0])] += degree\r\n red_nodes[(\r\n otu_table.SampleIds[non_zero_counts[0]],\r\n meta[0])] += degree\r\n else:\r\n # red_edge_file.append('\\t'.join([s, to_otu, \\\r\n # data_num, con, meta[0]]))\r\n red_edge_file.append('\\t'.join([s, otu_id,\r\n data_num, con, meta[0]]))\r\n\r\n num_otu_nodes = len(node_file)\r\n for s in meta_dict:\r\n meta = meta_dict[s]\r\n degree = meta[1]\r\n sample_dc[degree] += 1\r\n degree_counts[degree] += 1\r\n weighted_degree = sample_num_seq[s]\r\n node_file_line = '\\t'.join([s, s, 'user_node', str(meta[1]),\r\n str(weighted_degree), 'other', meta[0]])\r\n node_file.append(node_file_line)\r\n red_node_file.append(node_file_line)\r\n\r\n for n, d in red_nodes.items():\r\n red_node_file_line = ['@' + n[0], '',\r\n 'otu_collapsed', str(d), str(float(d)), 'other']\r\n red_node_file_line.extend(['otu'] * num_meta)\r\n red_node_file.append('\\t'.join(red_node_file_line))\r\n red_edge_file.append(\r\n '\\t'.join([n[0], '@' + n[0], \"1.0\", \"missed\", n[1]]))\r\n\r\n return con_by_sample, node_file, edge_file, red_node_file,\\\r\n red_edge_file, otu_dc, degree_counts, sample_dc", "def connectionoptions(self, tokens):\n\n return self.process_value_pairs(tokens, \"connectionoptions\")", "def fw_handshake_4_4_tkip( self , eapolMIC = True , eapolMICFlag = True , customFlaglist = None , addNonce = None , customRC = None , addData = None ):\n\t\tparameterList = 'eapolMIC=' + str(eapolMIC) + ',eapolMICFlag=' + str(eapolMICFlag) + ',customFlaglist=' + str(customFlaglist) + ',addNonce=' + str(addNonce) + ',customRC=' + str(customRC) + ',addData=' + str(addData)\n\t\tself.logger.log( self.logger.TRANSMIT , 'EAPOL 4-Way Handshake Message 4/4 TKIP (' + parameterList + ')')\n\t\ttry:\n\t\t\t\t\t\t\n\t\t\t# Create an empty EAPOL WPA Key packet.\n\t\t\tpacket \t\t= EAPOL( version=1 , type='EAPOL-Key' )/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tpacketKey \t= packet.getlayer( EAPOL_WPAKey )\n\t\t\tflaglist \t= ['HMAC_MD5_RC4','idx0','pairwise']\n\t\t\tif eapolMICFlag is True:\n\t\t\t\tflaglist.append('mic')\n\t\t\t\n\t\t\t# Fill in the fields.\n\t\t\tif customFlaglist is not None:\n\t\t\t\tflaglist = customFlaglist\n\t\t\tpacketKey.KeyInfo = self.__getKeyInformation( flaglist )\n\t\t\tif customRC is not None:\n\t\t\t\tif customRC == 'lower':\t\n\t\t\t\t\tself.replayCounter -= 1\n\t\t\t\telif customRC == 'higher':\n\t\t\t\t\tself.replayCounter += 1\n\t\t\tpacketKey.ReplayCounter = self.replayCounter\n\t\t\tif addNonce is not None:\n\t\t\t\tif addNonce == 'supplicant':\n\t\t\t\t\tpacketKey.Nonce = self.SNonce\n\t\t\t\tif addNonce == 'authenticator':\n\t\t\t\t\tpacketKey.Nonce = self.ANonce\n\t\t\t\tif addNonce == 'random':\n\t\t\t\t\tpacketKey.Nonce = binascii.a2b_hex( os.urandom( 32 ).encode('hex') )\n\t\t\tif addData is not None:\n\t\t\t\tif addData == 'data':\n\t\t\t\t\tpacketKey.WPAKeyLength \t= 32\n\t\t\t\t\tpacketKey.WPAKey \t= binascii.a2b_hex( os.urandom( 32 ).encode('hex') )\n\t\t\t\tif addData == 'dataNoLength':\n\t\t\t\t\tpacketKey.WPAKeyLength \t= 0\n\t\t\t\t\tpacketKey.WPAKey \t= binascii.a2b_hex( os.urandom( 32 ).encode('hex') )\n\t\t\t\tif addData == 'dataShortLength':\n\t\t\t\t\tpacketKey.WPAKeyLength \t= 16\n\t\t\t\t\tpacketKey.WPAKey \t= binascii.a2b_hex( os.urandom( 32 ).encode('hex') )\n\t\t\t\tif addData == 'dataLongLength':\n\t\t\t\t\tpacketKey.WPAKeyLength \t= 48\n\t\t\t\t\tpacketKey.WPAKey \t= binascii.a2b_hex( os.urandom( 32 ).encode('hex') )\n\t\t\t\n\t\t\t# Calculate and add the MIC.\n\t\t\tif eapolMIC is True:\n\t\t\t\tmic = HMAC.new( self.KCK , msg=str( packet ) , digestmod=Crypto.Hash.MD5 )\n\t\t\t\tpacketKey.WPAKeyMIC = mic.digest()\n\t\t\t\n\t\t\t# Transmit.\n\t\t\tsendp(RadioTap()/\n\t\t\t\tDot11( addr1=self.addr1 , addr2=self.addr2 , addr3=self.addr1 , type='Data' , subtype=0x00 , FCfield='to-DS' )/\n\t\t\t\tLLC( dsap=0xaa , ssap=0xaa , ctrl=0x03 )/\n\t\t\t\tSNAP( OUI=0x000000 , code=0x888e )/\n\t\t\t\tpacket,\n\t\t\t\tiface=self.iface , verbose=False )\n\t\t\t\n\t\texcept:\n\t\t\traise", "def establish_connection(self, data, data_size, acpi, dest_group_addr):\n # -----------------------------------\n # -> (1) Sending Connection request\n # -----------------------------------\n conn_resp_object = self.connection_request()\n # <- Retrieving channel_id & status from Connection response\n conn_channel_id = conn_resp_object.channel_id\n conn_status = conn_resp_object.status\n self.channel_id = conn_channel_id\n print('Channel ID: ', conn_channel_id)\n print('Channel status: ', conn_status)\n print('-----------------------------------')\n # -----------------------------------\n # -> (2) Sending Connection State request\n # -----------------------------------\n state_resp_object = self.connection_state_request()\n # <- Retrieving channel_id & status from Connection State response\n state_channel_id = state_resp_object.channel_id\n state_status = state_resp_object.status\n print('Channel ID: ', state_channel_id)\n print('Channel status: ', state_status)\n print('-----------------------------------')\n # -----------------------------------\n # -> (3) Tunneling request\n # -----------------------------------\n tunnel_resp_object = self.tunneling_request(data, data_size, dest_group_addr, acpi)\n # <- Retrieving data from Tunneling response\n tunnel_channel_id = tunnel_resp_object.channel_id\n tunnel_status = tunnel_resp_object.status\n self.sequence_counter = tunnel_resp_object.sequence_counter\n print('Channel ID: ', tunnel_channel_id)\n print('Channel status: ', tunnel_status)\n print('Sequence counter: ', self.sequence_counter)\n print('-----------------------------------')\n # -----------------------------------\n # -> (4) Tunneling request read\n # -----------------------------------\n self.tunneling_request_read()", "def gk_handshake_1_2_tkip( self , packet ):\n\t\ttry:\n\t\t\t\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext \t\t= self.handleTKIP.decapsulate( packet , self.TK , self.MMICTxK )\n\t\t\tpacket \t\t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t\t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_MD5_RC4','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 TKIP' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.MD5 )\n\t\t\t\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Use ARC4 to decrypt the WPAKey-field, containing the Group Temporal Key.\n\t\t\t# First skip the first 256 bytes of ARC4, then decrypt the cipher.\n\t\t\t# Ref. IEEE 802.11i specification (2004); EAPOL-Key frames (Key Descriptor\n\t\t\t# Version 1).\n\t\t\tkey\t\t= new_packet.KeyIV + self.KEK\n\t\t\tarc4\t\t= ARC4.new( key )\n\t\t\tarc4.decrypt( '\\x00'*256 )\n\t\t\tself.GTK \t= arc4.decrypt( new_packet.WPAKey ) # Resulting key of 32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise", "def initConnTermFrame(self,referenceID):\r\n # Strip any colons in the mac address\r\n self.referenceID = referenceID\r\n\r\n # Set the frame content\r\n self.content = \"\"\r\n\r\n # Set the content length\r\n self.contentLength = 0\r\n\r\n # Set the correct frame message type\r\n self.mesgType = MULTIPLEXER_CONN_TERM", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def cleanse(packets):\n pkts = []\n retran = False\n lost = False\n for pkt in packets:\n if len(pkt['data']) > 0:\n # If first packet just add and move on\n if len(pkts) == 0:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is = to this one add this pkt\n elif pkt['tcp']['seq_num'] == next_seq:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is > than this one there is a \n # Retransmission\n elif pkt['tcp']['seq_num'] < next_seq:\n retran = True\n elif pkt['tcp']['seq_num'] > next_seq:\n lost = True\n else:\n pass\n\n return pkts, retran, lost", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def connectlist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n self.symbol = self.scanner.get_symbol()\n\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n\n while (self.symbol.type == self.scanner.NAME):\n self.connection()\n # Each connection decrements pin count by one\n self.num_input_pin -= 1\n\n # Check right curly bracket ends connections block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.MONITOR_ID):\n # Error Type: missing '}'\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated connections incorrectly\n # Error type: Invalid name\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NAME_STRING, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'CONNECT'\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_CONNECT, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n\n else:\n # Error: 'CONNECT' keyword required\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NEED_CONNECT_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n\n # Check all input pins have been connected\n if self.error_count == 0:\n if self.num_input_pin != 0:\n # Error: Floating inputs pins\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.FLOATING_INPUT_PIN, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def _find_adc_connections(\n self, adc_name: str, config_group: h5py.Group\n ) -> Tuple[Tuple[int, Tuple[int, ...], Dict[str, Any]], ...]:\n config_name = self._parse_config_name(os.path.basename(config_group.name))\n active = self.deduce_config_active_status(config_name)\n\n # initialize conn, brd, and chs\n # conn = list of connections\n # brd = board number\n # chs = list of connect channels of board brd\n #\n conn = []\n\n # Determine connected (brd, ch) combinations\n # scan thru board groups\n for board in config_group:\n # Is it a board group?\n if not bool(re.fullmatch(r\"Boards\\[\\d+\\]\", board)):\n warn(\n f\"'{board}' does not match expected board group name...\"\n f\"not adding to mapping\"\n )\n continue\n\n # get board number\n brd_group = config_group[board]\n try:\n brd = brd_group.attrs[\"Board\"]\n except KeyError:\n raise HDFMappingError(\n self.info[\"group path\"], \"board number attribute 'Board' missing\"\n )\n\n # ensure brd is an int\n if not isinstance(brd, (int, np.integer)):\n warn(\"Board number is not an integer\")\n continue\n elif brd < 0:\n warn(\"Board number is less than 0.\")\n continue\n\n # ensure there's no duplicate board numbers\n if brd in [sconn[0] for sconn in conn]:\n why = (\n f\"HDF5 structure unexpected...'{config_group.name}' defines \"\n f\"duplicate board numbers\"\n )\n\n # error if active, else warn\n if active:\n raise HDFMappingError(self.info[\"group path\"], why=why)\n else:\n warn(why)\n\n # skip adding to conn list\n continue\n\n # scan thru channel groups\n chs = []\n for ch_key in brd_group:\n # Is it a channel group?\n if not bool(re.fullmatch(r\"Channels\\[\\d+\\]\", ch_key)):\n warn(\n f\"'{board}' does not match expected channel group name\"\n f\"...not adding to mapping\"\n )\n continue\n\n # get channel number\n ch_group = brd_group[ch_key]\n try:\n ch = ch_group.attrs[\"Channel\"]\n except KeyError:\n raise HDFMappingError(\n self.info[\"group path\"],\n \"Channel number attribute 'Channel' missing\",\n )\n\n # ensure ch is an int\n if not isinstance(ch, (int, np.integer)):\n warn(\"Channel number is not an integer\")\n continue\n elif ch < 0:\n warn(\"Channel number is less than 0.\")\n continue\n\n # define list of channels\n chs.append(ch)\n\n # ensure connected channels are unique\n if len(chs) != len(set(chs)):\n why = (\n f\"HDF5 structure unexpected...'{brd_group.name}' does not \"\n f\"define a unique set of channel numbers...not adding to \"\n f\"`configs` dict\"\n )\n warn(why)\n\n # skip adding to conn list\n continue\n\n # ensure chs is not NULL\n if len(chs) == 0:\n why = (\n f\"HDF5 structure unexpected...'{brd_group.name}' does not \"\n f\"define any valid channel numbers...not adding to \"\n f\"`configs` dict\"\n )\n warn(why)\n\n # skip adding to conn list\n continue\n\n # build subconn tuple with connected board, channels, and\n # acquisition parameters\n subconn = (brd, tuple(chs), {\"bit\": None, \"clock rate\": (None, \"MHz\")})\n\n # add to all connections list\n conn.append(subconn)\n\n return tuple(conn)", "def socket_thread_http_entry(self, msg):\n # V/nsHttp nsHttpConnection::Activate [this=ed6c450 trans=143f3c00 caps=21]\n if msg['message'].startswith('nsHttpConnection::Activate '):\n match = re.search(r'^nsHttpConnection::Activate \\['\n r'this=(?P<connection>[\\w\\d]+) '\n r'trans=(?P<id>[\\w\\d]+)', msg['message'])\n if match:\n connection = match.groupdict().get('connection')\n trans_id = match.groupdict().get('id')\n if trans_id in self.http['requests']:\n self.http['requests'][trans_id]['connection'] = connection\n # V/nsHttp nsHttpConnection::Init this=ed6c450\n elif msg['message'].startswith('nsHttpConnection::Init ') and \\\n 'current_socket' in self.http:\n match = re.search(r'^nsHttpConnection::Init '\n r'this=(?P<connection>[\\w\\d]+)', msg['message'])\n if match:\n connection = match.groupdict().get('connection')\n socket = self.http['current_socket']\n self.http['connections'][connection] = {'socket': socket}\n del self.http['current_socket']\n elif msg['message'].startswith('nsHttpConnection::SetupSSL '):\n match = re.search(r'^nsHttpConnection::SetupSSL (?P<connection>[\\w\\d]+)',\n msg['message'])\n if match:\n connection = match.groupdict().get('connection')\n if connection in self.http['connections']:\n if 'ssl_start' not in self.http['connections'][connection]:\n self.http['connections'][connection]['ssl_start'] = msg['timestamp']\n elif msg['message'].startswith('nsHttpConnection::EnsureNPNComplete '):\n match = re.search(r'^nsHttpConnection::EnsureNPNComplete (?P<connection>[\\w\\d]+)',\n msg['message'])\n if match:\n connection = match.groupdict().get('connection')\n if connection in self.http['connections']:\n if 'ssl_start' in self.http['connections'][connection]:\n self.http['connections'][connection]['ssl_end'] = msg['timestamp']\n elif msg['message'].startswith('nsHttpTransaction::OnTransportStatus ') and \\\n msg['message'].find(' SENDING_TO ') > -1:\n match = re.search(r'^nsHttpTransaction::OnTransportStatus (?P<id>[\\w\\d]+) SENDING_TO ',\n msg['message'])\n if match:\n trans_id = match.groupdict().get('id')\n if trans_id in self.http['requests'] and \\\n 'start' not in self.http['requests'][trans_id]:\n self.http['requests'][trans_id]['start'] = msg['timestamp']\n elif msg['message'].startswith('nsHttpTransaction::OnSocketStatus ') and \\\n msg['message'].find(' status=804b0005 progress=') > -1:\n match = re.search(r'^nsHttpTransaction::OnSocketStatus '\\\n r'\\[this=(?P<id>[\\w\\d]+) status=804b0005 progress=(?P<bytes>[\\d+]+)',\n msg['message'])\n if match:\n trans_id = match.groupdict().get('id')\n byte_count = int(match.groupdict().get('bytes'))\n if byte_count > 0 and trans_id in self.http['requests'] and \\\n 'start' not in self.http['requests'][trans_id]:\n self.http['requests'][trans_id]['start'] = msg['timestamp']\n elif msg['message'].startswith('nsHttpTransaction::ProcessData '):\n match = re.search(r'^nsHttpTransaction::ProcessData \\[this=(?P<id>[\\w\\d]+)',\n msg['message'])\n if match:\n trans_id = match.groupdict().get('id')\n self.http['current_socket_transaction'] = trans_id\n elif msg['message'].startswith('nsHttpTransaction::HandleContent '):\n if 'current_socket_transaction' in self.http:\n del self.http['current_socket_transaction']\n match = re.search(r'^nsHttpTransaction::HandleContent \\['\n r'this=(?P<id>[\\w\\d]+) '\n r'count=(?P<len>[\\d]+) read=', msg['message'])\n if match:\n trans_id = match.groupdict().get('id')\n if trans_id in self.http['requests']:\n bytes_in = int(match.groupdict().get('len'))\n if 'first_byte' not in self.http['requests'][trans_id]:\n self.http['requests'][trans_id]['first_byte'] = msg['timestamp']\n if 'end' not in self.http['requests'][trans_id] or \\\n msg['timestamp'] > self.http['requests'][trans_id]['end']:\n self.http['requests'][trans_id]['end'] = msg['timestamp']\n self.http['requests'][trans_id]['bytes_in'] += bytes_in\n self.http['requests'][trans_id]['chunks'].append(\\\n {'ts': msg['timestamp'], 'bytes': bytes_in})\n elif msg['message'].startswith('Http2Stream::Http2Stream '):\n match = re.search(r'^Http2Stream::Http2Stream '\n r'(?P<stream>[\\w\\d]+) '\n r'trans=(?P<id>[\\w\\d]+) ', msg['message'])\n if match:\n stream = match.groupdict().get('stream')\n trans_id = match.groupdict().get('id')\n if stream not in self.http['streams']:\n self.http['streams'][stream] = {}\n if 'trans_id' not in self.http['streams'][stream]:\n self.http['streams'][stream]['request_id'] = trans_id\n elif msg['message'].startswith('Http2Session::RegisterStreamID '):\n match = re.search(r'^Http2Session::RegisterStreamID '\n r'session=[\\w\\d]+ '\n r'stream=(?P<stream>[\\w\\d]+) '\n r'id=(?P<id>0x[\\w\\d]+) ', msg['message'])\n if match:\n stream = match.groupdict().get('stream')\n stream_id = int(match.groupdict().get('id'), 16)\n if stream in self.http['streams']:\n self.http['streams'][stream]['stream_id'] = stream_id\n elif msg['message'].startswith('Http2Stream::UpdatePriorityDependency '):\n match = re.search(r'^Http2Stream::UpdatePriorityDependency '\n r'(?P<stream>[\\w\\d]+) '\n r'depends on stream (?P<parent>0x[\\w\\d]+) ', msg['message'])\n if match:\n stream = match.groupdict().get('stream')\n parent_id = int(match.groupdict().get('parent'), 16)\n if stream in self.http['streams']:\n self.http['streams'][stream]['parent_stream_id'] = parent_id\n elif msg['message'].startswith('Http2Stream '):\n match = re.search(r'^Http2Stream '\n r'(?P<stream>[\\w\\d]+) '\n r'Generating [\\d]+ bytes of HEADERS for '\n r'stream (?P<id>0x[\\w\\d]+) '\n r'with priority weight (?P<weight>[\\d]+) '\n r'dep (?P<parent>0x[\\w\\d]+) ', msg['message'])\n if match:\n stream = match.groupdict().get('stream')\n stream_id = int(match.groupdict().get('id'), 16)\n weight = int(match.groupdict().get('weight'), 10)\n parent_id = int(match.groupdict().get('parent'), 16)\n if stream in self.http['streams']:\n self.http['streams'][stream]['stream_id'] = stream_id\n self.http['streams'][stream]['weight'] = weight\n self.http['streams'][stream]['parent_stream_id'] = parent_id\n elif 'current_socket_transaction' in self.http and \\\n msg['message'].startswith('nsHttpTransaction::ParseLine '):\n trans_id = self.http['current_socket_transaction']\n if trans_id in self.http['requests']:\n if trans_id in self.http['requests']:\n if 'first_byte' not in self.http['requests'][trans_id]:\n self.http['requests'][trans_id]['first_byte'] = msg['timestamp']\n if 'end' not in self.http['requests'][trans_id] or \\\n msg['timestamp'] > self.http['requests'][trans_id]['end']:\n self.http['requests'][trans_id]['end'] = msg['timestamp']\n match = re.search(r'^nsHttpTransaction::ParseLine \\[(?P<line>.*)\\]\\s*$',\n msg['message'])\n if match:\n line = match.groupdict().get('line')\n self.http['requests'][trans_id]['response_headers'].append(line)\n elif 'current_socket_transaction' in self.http and \\\n msg['message'].startswith('Have status line '):\n trans_id = self.http['current_socket_transaction']\n if trans_id in self.http['requests']:\n if trans_id in self.http['requests']:\n if 'first_byte' not in self.http['requests'][trans_id]:\n self.http['requests'][trans_id]['first_byte'] = msg['timestamp']\n if 'end' not in self.http['requests'][trans_id] or \\\n msg['timestamp'] > self.http['requests'][trans_id]['end']:\n self.http['requests'][trans_id]['end'] = msg['timestamp']\n match = re.search(r'^Have status line \\[[^\\]]*status=(?P<status>\\d+)',\n msg['message'])\n if match:\n status = int(match.groupdict().get('status'))\n self.http['requests'][trans_id]['status'] = status", "def recv(self, conn):\n flags = {\"Z\": 0, \"B\": 0}\n meta = {\"status\": \"\", \"length\": \"\"}\n def reset():\n \"\"\"\n resets if data corruption detected via mis-matches\n :return: None\n \"\"\"\n for i in flags.keys(): flags[i] = 0\n for i in meta.keys(): meta[i] = \"\"\n return (None, \"CON\")\n while True:\n self.logger.info(conn.recv(1, socket.MSG_PEEK))\n if not len(conn.recv(1, socket.MSG_PEEK)) > 0: return (None, \"CON\")\n self.logger.info(\"Running\")\n i = conn.recv(1)\n if chr(i) == \"Z\" and flags[\"Z\"] < 3:\n flags[\"Z\"] += 1\n continue\n elif flags[\"Z\"] < 3 and chr(i) != \"Z\": reset() #corruption condition\n # puts everything between Z & B into status string\n if flags[\"Z\"] == 3 and chr(i) != \"B\" and len(meta[\"status\"]) < 3:\n meta[\"status\"] += chr(i)\n continue\n # cycles through B's until length\n if chr(i) == \"B\" and flags[\"B\"] < 3:\n flags[\"B\"] += 1\n continue\n elif flags[\"B\"] < 3 and chr(i) != \"B\": reset() #corruption condition\n if flags[\"B\"] == 3 and chr(i) != \"C\":\n meta[\"length\"] += chr(i)\n continue\n if flags[\"B\"] == 3 and chr(i) == \"C\":\n # return tuple (py object, status)\n #super().read(1) #kick \"C\" out of the serial buffer\n self.logger.debug(f\"Attempting to load packet of size {meta['length']}\")\n packet = (\n pickle.loads(conn.recv(int(meta[\"length\"]))),\n meta[\"status\"]\n )\n self.logger.debug(f\"Received Packet of size {sys.getsizeof(packet[0])} Bytes with Network Status {packet[1]}\")\n if packet[1] == \"FIN\":\n self.logger.warning(\"Lost Connection, looking for devices\")\n self.connection = False\n elif packet[1] == \"ACK\" and self.connection:\n #clear buffer of residual ACK packets\n return self.recv()\n return packet", "def post_process(self, packet: 'dict[str, Any]') -> 'MPTCP':\n ret = self.data\n\n ret.option = Enum_Option.Multipath_TCP\n ret.length = self.test['length']\n ret.subtype = Enum_MPTCPOption.get(packet['test']['subtype'])\n\n return ret", "def _check_connectionline(self):\n self.connection_first_device, \\\n self.connection_first_port \\\n = self._check_validconnectionoutput()\n if self._is_arrow(self.symbol):\n # Get next symbol\n self.symbol = self.scanner.get_symbol()\n self.connection_second_device, \\\n self.connection_second_port \\\n = self._check_validconnectioninput()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create connection if no previous errors\n connection_error = self._connection_maker(\n self.connection_first_device,\n self.connection_first_port,\n self.connection_second_device,\n self.connection_second_port)\n # Send the returned error ID for error reporting\n self._display_semantic_error(connection_error)\n # Run a while loop to check for possible multiple connections from\n # same output\n while (\n not self._is_semicolon(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n if self._is_comma(self.symbol):\n self.symbol = self.scanner.get_symbol()\n self.connection_second_device, \\\n self.connection_second_port \\\n = self._check_validconnectioninput()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create connection if no previous errors\n connection_error = self._connection_maker(\n self.connection_first_device,\n self.connection_first_port,\n self.connection_second_device,\n self.connection_second_port)\n # Send the returned error ID for error reporting\n self._display_semantic_error(connection_error)\n else:\n # No comma\n self._display_syntax_error(\"comma\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n elif self._is_semicolon(self.symbol):\n self.symbol = self.scanner.get_symbol()\n else:\n # No '->'\n self._display_syntax_error(\"arrow\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n return None", "def dataReceived(self,data):\n if DEBUG: print \"class CommandProtocol, function dataReceived\"\n if data[6:12] == \"status\":\n print self.server.xstatus()\n self.transport.write(\"<XML>\"+self.server.xstatus()+\"</XML>\")\n self.transport.loseConnection()\n return\n if DEBUG and len(data) < 10000: print \"data:\", data\n # on receipt of the first fragment determine message length, extract header info\n # NOTE: this can only handle header lengths smaller than the fragment size - \n # the header MUST arrive in the first fragment\n # append the new data \n self.alldata += data\n if u\"?console\" in data: self.provide_console()\n #requests = 0 #For use with priorities\n if not hasattr(self,'mlength'):\n # attempt to extract the header info with the current message subset\n try: \n self.dataHTTP = HTTPRequest(self.alldata)\n self.boundary = self.dataHTTP.headers['content-type'].split('boundary=')[-1]\n fb = data.find('--' + self.boundary) # find the first used boundary string\n if fb == -1:\n return # if there is none, the header must not be complete\n # if there is a boundary, header must be complete; get header data\n self.mlength = fb + int(self.dataHTTP.headers.dict['content-length'])\n headerItemsforCommand = ['host','origin','referer']\n self.request = {k: self.dataHTTP.headers[k] for k in headerItemsforCommand if k in self.dataHTTP.headers}\n self.request.update({'ctime':self.ctime,'protocol':self})\n # record where this request is coming from\n self.factory.connection_manager.elaborateLog(self,self.request)\n except: return # if unsuccessful, wait for next packet and try again\n \n # if we made it to here, the header has been received\n # if the entirety of message not yet received, append this fragment and continue\n if self.mlength > len(self.alldata):\n return\n # if we have made it here, this is last fragment of message \n # mark the 'all data received' time\n self.request.update({'timereceived':time.time()})\n # strip multipart data from incoming HTTP request\n kv = [datas.split('name=\"')[-1].split('\"\\n\\r\\n\\r') for datas in self.alldata.split('--'+self.boundary+'--')]\n self.params = {k:v.rstrip() for k,v in kv[:-1]}\n # insert request, if valid, into command queue (persistently resides in self.Factory) \n #pdb.set_trace()\n #SC=SocketCommand(self.params,self.request)\n SC=commands.SocketCommand(self.params,self.request, self.server.command_library)#CP 2014-10-28\n try:\n self.factory.connection_manager.server.command_queue.add(SC)\n #self.factory.commandQueue.add(SC)\n except AttributeError:\n if DEBUG: print 'Failed to insert SocketCommand in Queue, No Queue'\n raise\n #self.factory.commandQueue=CommandQueue(SC)\n except:\n if DEBUG: print \"Error No command included in request\", SC\n msg = {'Not_Command_text_message':'Failed to insert SocketCommand in Queue, reason unknown','terminator':'die'}\n self.transport.write(simplejson.dumps(msg, ensure_ascii = False).encode('utf8'))\n if DEBUG: print 'Failed to insert SocketCommand in Queue, reason unknown'\n self.transport.loseConnection()\n raise", "def decomptcptxpackets(self) :\n\t\ttry :\n\t\t\treturn self._decomptcptxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def open_ack_msg(conn_id):\n return CCPMessage(CCPMessage.OPEN_CONNECTION_ACK, conn_id=conn_id)", "def __getHeaderInfo(self, decoded_data):\n\t\tip = decoded_data.child()\n\t\ttcp = ip.child()\n\t\t#src = (ip.get_ip_src(), tcp.get_th_sport())\n\t\ttry:\tsrc = ip.get_ip_src()\n\t\texcept:\tsrc = '?'\n\t\t#dst = (ip.get_ip_dst(), tcp.get_th_dport())\n\t\ttry:\tdst = ip.get_ip_dst()\n\t\texcept:\tdst = '?'\n\t\t#data = tcp.get_data_as_string()\n\t\tdata = tcp.get_packet()\n\t\treturn (src, dst, data)", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def get_hsdpa_ack_meas(self, numSubframes=2000):\r\r\n\r\r\n # perform HSDPA ACK measurements\r\r\n\r\r\n loggerCmw = logging.getLogger('get_hsdpa_ack_meas')\r\r\n\r\r\n self.set_num_scheduled_subframes(numSubframes)\r\r\n\r\r\n # determine the timeout per measurement\r\r\n # work out approximate time for numSubframes\r\r\n # (numSubframes * 0.002 = 10)\r\r\n meas_timeout_sec = int ( (self.get_num_scheduled_subframes() * 0.002) + 10 )\r\r\n\r\r\n meas_sample_time_sec = 2\r\r\n\r\r\n self.conf_hsdpa_ack_meas(timeout_sec=meas_timeout_sec)\r\r\n\r\r\n self.write('INIT:WCDMA:SIGN:HACK')\r\r\n\r\r\n self.waitForCompletion()\r\r\n\r\r\n num_iter = 0\r\r\n\r\r\n NUM_ITER_MAX = int(math.ceil(meas_timeout_sec/meas_sample_time_sec))\r\r\n\r\r\n loggerCmw.info(\"Obtaining HSDPA measurements for instrument. Please be patient ...\")\r\r\n\r\r\n while ( num_iter < NUM_ITER_MAX ):\r\r\n\r\r\n num_iter += 1\r\r\n\r\r\n loggerCmw.debug(\"FETCHING HSDPA ACK MEAS: iteration %d of %d\" % (num_iter, NUM_ITER_MAX))\r\r\n\r\r\n state=self.read('FETCh:WCDMa:SIGN:HACK:STATe?')\r\r\n\r\r\n loggerCmw.debug(\"FETCH STATE : %s\" % state)\r\r\n\r\r\n if (state == 'RDY') :\r\r\n\r\r\n break\r\r\n\r\r\n loggerCmw.debug(\"Waiting for %02f [sec]... \" % meas_sample_time_sec)\r\r\n\r\r\n time.sleep(meas_sample_time_sec)\r\r\n\r\r\n if state == 'RDY':\r\r\n\r\r\n avgCqi_1_str=self.read('FETCh:WCDMa:SIGN:HACK:MCQI:CARRier1?')\r\r\n\r\r\n avgTput=self.read('FETCh:WCDMa:SIGN:HACK:TRACe:THRoughput:TOTal:AVERage?')\r\r\n\r\r\n avgCqi_1_list = avgCqi_1_str.split(',')\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n avgCqi_2_str=self.read('FETCh:WCDMa:SIGN:HACK:MCQI:CARRier2?')\r\r\n\r\r\n avgCqi_2_list = avgCqi_2_str.split(',')\r\r\n\r\r\n if avgCqi_1_list[0] == \"0\":\r\r\n\r\r\n # valid CQI measurement\r\r\n loggerCmw.debug('Median CQI, carrier 1 : %s' %avgCqi_1_list[1])\r\r\n\r\r\n self.set_medianCqi(carrier=1, val=avgCqi_1_list[1])\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n if avgCqi_2_list[0] == \"0\":\r\r\n\r\r\n # valid CQI measurement\r\r\n loggerCmw.debug('Median CQI, carrier 2 : %s' %avgCqi_2_list[1])\r\r\n\r\r\n self.set_medianCqi(carrier=2, val=avgCqi_2_list[1])\r\r\n\r\r\n hspda_stats_str_1 = self.read('FETCh:WCDMa:SIGN:HACK:THRoughput:CARRier1:ABSolute?')\r\r\n hack_meas_list_1 = hspda_stats_str_1.split(',')\r\r\n loggerCmw.debug('HSDPA ACK stats %s' %hack_meas_list_1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n hspda_stats_str_2 = self.read('FETCh:WCDMa:SIGN:HACK:THRoughput:CARRier2:ABSolute?')\r\r\n hack_meas_list_2 = hspda_stats_str_2.split(',')\r\r\n loggerCmw.debug('HSDPA ACK stats %s' %hack_meas_list_2)\r\r\n\r\r\n self.hsdpa_meas[0].set_results_list(hack_meas_list_1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_meas[1].set_results_list(hack_meas_list_2)\r\r\n\r\r\n\r\r\n self.get_ack_trans_meas(carrier=1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.get_ack_trans_meas(carrier=2)\r\r\n\r\r\n numMeasuredFrames = self.get_measured_subframes()\r\r\n\r\r\n if numMeasuredFrames == self.NO_MEASURED_FRAMES_STR:\r\r\n\r\r\n return 0\r\r\n\r\r\n self.set_hsdpa_measured_subframes(numMeasuredFrames)\r\r\n\r\r\n blerVal = self.get_instr_hsdpa_bler(carrier=1)\r\r\n\r\r\n self.set_hsdpa_bler(blerVal, carrier=1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n blerVal = self.get_instr_hsdpa_bler(carrier=2)\r\r\n\r\r\n self.set_hsdpa_bler(blerVal, carrier=2)\r\r\n\r\r\n\r\r\n return 1", "def connectInfo(self,compInfo, node, nodeDic, numNodesSub,subcktName):\n connInfo = []\n print \"compinfo-------->\",compInfo\n sourcesInfo = self.separateSource(compInfo)\n for eachline in compInfo:\n words = eachline.split()\n print \"eachline----->\",eachline\n print \"eachline[0]------->\",eachline[0]\n if eachline[0]=='r' or eachline[0]=='R' or eachline[0]=='c' or eachline[0]=='C' or eachline[0]=='d' or eachline[0]=='D' \\\n or eachline[0]=='l' or eachline[0]=='L' or eachline[0]=='v' or eachline[0]=='V':\n conn = 'connect(' + words[0] + '.p,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='q' or eachline[0]=='Q':\n print \"Inside Transistor--->\"\n print \"Node Dict------>\",nodeDic\n conn = 'connect(' + words[0] + '.C,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.E,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='m' or eachline[0]=='M':\n conn = 'connect(' + words[0] + '.D,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.G,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.S,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[4]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['f','h','F','H']:\n vsource = words[3]\n sourceNodes = sourcesInfo[vsource]\n sourceNodes = sourceNodes.split()\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[sourceNodes[0]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[sourceNodes[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n temp = templine[0].split('x')\n index = temp[1]\n for i in range(0,len(templine),1):\n if templine[i] in subcktName: #Ask Manas Added subcktName in function Call\n subname = templine[i]\n nodeNumInfo = self.getSubInterface(subname, numNodesSub)\n for i in range(0, numNodesSub[subname], 1):\n #conn = 'connect(' + subname + '_instance' + index + '.' + nodeDic[nodeNumInfo[i]] + ',' + nodeDic[words[i+1]] + ');'\n conn = 'connect(' + subname + '_instance' + index + '.' + 'n'+ nodeNumInfo[i] + ',' + nodeDic[words[i+1]] + ');'\n connInfo.append(conn)\n else:\n continue\n if '0' or 'gnd' in node:\n conn = 'connect(g.p,n0);'\n connInfo.append(conn)\n \n return connInfo", "def handle_max_cons_per_ip(self):\n msg = \"Too many connections from the same IP address.\"\n self.respond(\"421 %s\" %msg)\n self.log(msg)\n self.close_when_done()", "def connection_mutation(self, connection_inno, attempts):\n tries = 0\n while tries < attempts:\n tries += 1\n\n # Get Random Nodes\n n1 = self.nodes[random_index(self.nodes.keys())]\n n2 = self.nodes[random_index(self.nodes.keys())]\n\n # Should Reverse\n if (\n (n1.type is NodeType.HIDDEN and n2.type is NodeType.INPUT)\n or (n1.type is NodeType.OUTPUT and n2.type is NodeType.HIDDEN)\n or (n1.type is NodeType.HIDDEN and n2.type is NodeType.INPUT)\n ):\n n1, n2 = n2, n1\n\n # Bad Connection Check 1\n if (\n (n1.type is NodeType.INPUT and n2.type is NodeType.INPUT)\n or (n1.type is NodeType.OUTPUT and n2.type is NodeType.OUTPUT)\n or (n1.id == n2.id)\n ):\n continue\n\n # Bad Connection Check 2\n if n1.layer == n2.layer:\n continue\n\n # Check for Circular Structures\n # List of nodes that should have their connections checked\n needs_checking = []\n # List of nodes that requires output from node2\n node_ids = []\n for con in self.connections.values():\n if con.in_node == n2.id:\n # Connection comes from node2\n needs_checking.append(con.out_node)\n node_ids.append(con.out_node)\n\n while len(needs_checking) > 0:\n node_id = needs_checking.pop(0)\n for con in self.connections.values():\n if con.in_node == node_id:\n # Connection comes from needs_checking node\n needs_checking.append(con.out_node)\n node_ids.append(con.out_node)\n\n # Check if node1 is dependent on node2\n if any(i == n1.id for i in node_ids):\n continue\n\n # Existing or Reverse Existing Connection Check\n if any(\n (con.in_node == n1.id and con.out_node == n2.id)\n or (con.in_node == n2.id and con.out_node == n1.id)\n for con in self.connections.values()\n ):\n continue\n\n self.add_connection(\n Connection(\n id=connection_inno.inc,\n in_node=n1.id,\n out_node=n2.id,\n weight=random(-1, 1),\n enabled=True,\n )\n )\n return True\n\n # print('could not mutate')\n return False", "def callback(self):\n server_addresses = self._address_book.list_by_key(key)\n for address in server_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], 9665))\n self.sident_verify(connection, v_event)\n except socket.error:\n continue\n else:\n return True\n neighbor_addresses = self._client_list.list()\n for address in neighbor_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], address[1]))\n \n\n def sident_verify(self, connection):\n \"\"\"Request the server send a signed verification of its identity with \n IP address, port and timestamp.\n\n sident stands for 'Server Identity'\n\n An sident_verify message is of the following form:\n\n {'type':'sident_verify'\n 'timestamp':<UNIX TIMESTAMP>}\n\n The server should reply with an sident_response message which is of\n the following form:\n\n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\"\"\"\n sident_verify_msg = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((sident_verify_msg, connection))\n return True\n\n def request_server_address(self, connection):\n \"\"\"Request the best guess at the current server address from a client\n peer. \n\n P2P nodes use the same JSON messaging style as the normal client and\n server. address_request messages are of the form:\n\n {'type':'address_request'\n 'timestamp':<UNIX TIMESTAMP>}\n\n And a server_address message is of the form:\n\n {'type':'server_address',\n 'key':<CRYPTOGRAPHIC KEY THAT UNIQUELY IDENTIFIES SERVER>,\n 'address':<SERVER ADDRESS>,\n 'port':<WHAT PORT THE SERVER LISTENS ON>,\n 'address_timestamp':<UNIX TIMESTAMP OF WHEN PEER RECEIVED ADDRESS>,\n 'signature':<VERIFICATION THAT INFORMATION CAME FROM SERVER ORIGINALLY>,\n 'timestamp':<UNIX TIMESTAMP OF WHEN MESSAGE WAS SENT>}\"\"\"\n address_request = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((address_request, connection))\n return True\n \n\n def send_loop(self):\n \"\"\"Send loop that is meant to be started from a seperate thread of \n execution. The send loop pulls 'raw' python object messages from this \n objects send_queue attribute and converts them to json strings before \n encoding them as utf-8 to send across the wire. Sent along with the \n message is the connection to send it on.\n\n Responses are handled and received by the receive_loop method of this class\n which is ran in a seperate thread of execution.\"\"\"\n while not self._shutdown.is_set():\n message_tuple = self._send_queue.get()\n message = message_tuple[0]\n message_length = self._calculate_recursive_length(message)\n wrapped_message = [message_length, message]\n wire_message = (json.dumps(wrapped_message) + \"\\r\\n\\r\\n\").encode('utf-8')\n message_tuple[1].sendall(wire_message)\n return True\n\n def receive_loop(self):\n \"\"\"Receive loop that is meant to be started from a seperate thread of\n execution. The receive loop takes in 'raw' utf-8 json messages from the\n wire and decodes them, then interprets them to produce native python \n objects. The resulting objects are then handled by a method of this class\n of the form handle_<message_type>. For example if a message with the \n 'type' key 'test' came in like so:\n\n {'type':'test'}\n\n The method self.handle_test(message) would be called with the message\n dictionary object passed along.\n \"\"\"\n msg_buffer = bytes() # The message input buffer\n while not self._shutdown.is_set():\n if msg_buffer:\n try:\n msg_length = self.determine_length_of_json_msg(msg_buffer)\n except InvalidLengthHeader:\n msg_length = float(\"inf\")\n if len(msg_buffer) >= msg_length:\n message = self.extract_msg(msg_buffer, msg_length)\n try:\n handler = getattr(self, \"handle_\" + message['type'])\n except AttributeError:\n print(\"Can't handle message of type: \" +\n str(message['type']))\n continue\n handler(message)\n msg_buffer = msg_buffer[msg_length:]\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n \n def handle_sident_response(message):\n \"\"\"Handle an sident_response type message of the form:\n \n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\n \n The handler verifies that the information given by the server is properly\n signed, then adds the information to address books/etc, and finally \n resolves the issue using provided client logic methods and clears the \n error indicator.\"\"\"\n if self._client_logic.connection_error.is_set():\n try:\n ip_addr = message['ip_addr']\n port = message['port']\n timestamp = message['timestamp']\n signature = message['signature']\n except KeyError:\n return False\n sha_hash = SHA256.new(\n (ip_addr + \",\" + port + \",\" + timestamp).encode('utf-8'))\n if self._key.verify(sha_hash.digest(), signature):\n self._address_book.add_address(self._key, ip_addr, timestamp,\n signature, port=port)\n self._address_book.save()\n if self._client_logic.reconnect(ip_addr, port):\n self._client_logic.connection_error.clear()\n return True\n else:\n return False\n else:\n return False\n\n \n def determine_length_of_json_msg(self, message_bytes):\n \"\"\"Incrementally parse a JSON message to extract the length header.\n\n message_bytes: The bytes that represent the portion of the message \n recieved.\n \"\"\"\n # All messages must be written in utf-8\n message = message_bytes.decode('utf-8')\n # Check that the message we have been given looks like a valid length header\n if \",\" not in message:\n raise InvalidLengthHeader(message)\n length_portion = message.split(\",\")[0]\n left_bracket = length_portion[0] == \"[\"\n number_before_comma = length_portion[-1] in \"1234567890\"\n if left_bracket and number_before_comma:\n for character in enumerate(length_portion):\n if character[1] not in \"[ \\n\\t\\r1234567890,\":\n raise InvalidLengthHeader(length_portion)\n elif character[1] in \"1234567890\":\n length_start = character[0]\n return int(length_portion[length_start:])\n elif left_bracket:\n raise InvalidLengthHeader(length_portion)\n else:\n raise MissingLengthHeader(length_portion)\n return False\n\n def extract_msg(self, msg_buffer, length):\n message = msg_buffer[:length].decode()\n try:\n right_curly_bracket = message[-6] == \"}\" or message[-2] == \"}\"\n except IndexError:\n print(message, msg_buffer, length)\n valid_delimiter = message[-6:] == \"}]\\r\\n\\r\\n\"\n if right_curly_bracket and valid_delimiter:\n return message\n elif right_curly_bracket:\n raise InvalidMessageDelimiter(message)\n else:\n raise MissingMessageDelimiter(message)\n\n def _calculate_recursive_length(self, msg_dict):\n \"\"\"Calculate the length of a dictionary represented as JSON once a length\n field has been added as a key.\"\"\"\n delimiter = \"\\r\\n\\r\\n\"\n initial_length = len(\n json.dumps(msg_dict) + delimiter)\n initial_list = [initial_length, msg_dict]\n recursive_length = len(\n json.dumps(initial_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n while len(json.dumps(recursive_list) + delimiter) != recursive_list[0]:\n recursive_length = len(\n json.dumps(recursive_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n return recursive_list[0]", "def get_current_connection_num():\r\n\treturn (len(_clients), MAX_CONNECTION)", "def recalcAck(self):\n\t\t\n\t\t#if self.ackNo < 0:\n\t\t#\tself.ackNo = self.rSegs[0].segNo\n\t\t\n\t\t\n\t\twhile True:\n\t\t\t#Find Target segment\n\t\t\ttarget = self.ackNo + 1\n\t\t\tfound = False\n\t\t\tfor p in self.rSegs:\n\t\t\t\tif target == p.segNo:\n\t\t\t\t\tself.ackNo += 1\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#\n\t\t\tif not found:\n\t\t\t\tbreak", "def genAckList(self):\n\t\tresult = []\n\t\tfor p in self.rSegs:\n\t\t\tresult.append(p.segNo)\n\t\treturn result", "def get_overlapping_conn(conn1: NDArray,\n conn2: NDArray) -> Tuple[NDArray, NDArray]:\n\n conn_union = np.empty((0, 3), dtype=np.int32)\n\n # Get unique components\n if np.ma.is_masked(conn1):\n concomp1 = np.unique(conn1).compressed()\n else:\n concomp1 = np.unique(conn1)\n\n if np.ma.is_masked(conn2):\n concomp2 = np.unique(conn2).compressed()\n else:\n concomp2 = np.unique(conn2)\n\n # Loop through them and connect size and number of overlapping data\n for ix2 in concomp2:\n for ix1 in concomp1:\n # Skip 0 component combination with other components\n if not ix1 == 0 and not ix2 == 0:\n idx = np.where((conn1 == ix1) & (conn2 == ix2))[0]\n if np.count_nonzero(idx) > 0:\n carray = np.array([ix2, ix1, np.count_nonzero(idx)],\n dtype=np.int32, ndmin=2)\n\n conn_union = np.concatenate((conn_union, carray), axis=0)\n\n # Get 0 components in both frames\n elif ix1 == 0 and ix2 == 0:\n idx = np.where((conn1 == ix2) & (conn2 == ix1))[0]\n if np.count_nonzero(idx) > 0:\n carray = np.array([ix2, ix1, np.count_nonzero(idx)],\n dtype=np.int32, ndmin=2)\n\n conn_union = np.concatenate((conn_union, carray), axis=0)\n\n # Find components to correct in Frame 2\n conn_pairs = np.empty((0, 3), dtype=np.int32)\n\n for k in np.unique(conn_union[:, 0]):\n ik = conn_union[:, 0] == k\n # find number of times components is referenced\n count = np.sum(conn_union[:, 0] == k)\n\n if count > 1:\n max_points = np.max(conn_union[ik][:, 2])\n # Select the one with the most points\n ik = np.where((conn_union[:, 0] == k) &\n (conn_union[:, 2] == max_points))[0]\n # Select first if there are more pairs with same num of points\n ik = np.array(ik[0], ndmin=1) if ik.shape[0] > 1 else ik\n\n conn_pairs = np.concatenate((conn_pairs, conn_union[ik]), axis=0)\n\n return conn_pairs", "def handle_packet(self, pkt):\n logger.info('got a message:{}'.format(pkt))\n self._sock_rep_to_server.send_pyobj(packet.Ack())\n \n state = True\n extradata = {}\n \n if hasattr(self, 'handle_action'):\n _tmp = self.handle_action(pkt)\n try:\n state, data = _tmp\n extradata['extra'] = data\n except ValueError:\n extradata['extra'] = _tmp\n if extradata:\n state = False\n \n return state, extradata", "def setupPacket(self):\n return None", "def sendConnStatus(self):\n self.sendRunStatus({'name': 'dataConn', 'val': self.dataConns})\n self.sendRunStatus({'name': 'subjectConn', 'val': self.subjectConns})", "def _tcp_reassemble(self, number, src_addr, dst_addr, tcp):\n \n pld = tcp.message[tcp.header_len : tcp.header_len + tcp.segement_len]\n src_socket = (src_addr, tcp.src_port)\n dst_socket = (dst_addr, tcp.dst_port)\n sockets = (src_socket, dst_socket)\n\n def debug_cond(tcp):\n return False\n return True\n return tcp.stream_index == 710\n\n #check the other side of the tcp connection, flush the complete pdu to the msg_list\n if sockets in _tcp_buf and tcp.ack_num != _tcp_buf[sockets].ack: \n self._tcp_flush(sockets)\n del _tcp_buf[sockets]\n if debug_cond(tcp):\n print \"get a new http, decide by %d\" % number\n\n if debug_cond(tcp):\n print \"_tcp_reassemble, number= %d, sequence_num=%d, ack = %d, pldlen=%d, msglen=%d, opt_paddings=%d, iptotal_len=%d, ipheader_len=%d, tcpheader_len=%d\" % (number, tcp.ack_num, len(pld), len(tcp.message), len(tcp.opt_paddings), tcp.ip.total_len, tcp.ip.header_len, tcp.header_len)\n pass\n\n if pld:\n if not sockets in _tcp_buf:\n if debug_cond(tcp):\n print \" add a new message, begin with %d\" % number\n _tcp_buf[sockets] = Message({\n 'pcap_num_list': [],\n 'ts': self.packet_headers[number]['ts'] - self._ts_base,\n 'ip_proto': 'TCP',\n 'src_addr': src_addr,\n 'dst_addr': dst_addr,\n 'src_port': tcp.src_port,\n 'dst_port': tcp.dst_port,\n #'seq': tcp.sequence_num, HUA tcp disorder will generate error\n 'tcp_list': [],\n 'seq_min': 0,\n 'ack': tcp.ack_num,\n 'payload': [],\n 'stream_index': tcp.stream_index, # HUA add a stream index to message\n 'direction': tcp.direction, # HUA add to determin the http is request or response\n 'flag': False\n })\n try:\n _tcp_buf[sockets].ts = self.packet_headers[number]['ts'] - self._ts_base # HUA we should update ts and set it to last\n except:\n print number\n print len(self.packet_headers)\n _tcp_buf[sockets].pcap_num_list.append(number)\n if number == 2246:\n _tcp_buf[sockets].flag = False\n _tcp_buf[sockets].tcp_list.append(tcp)\n #offset = tcp.sequence_num - _tcp_buf[sockets].seq # seq 是相对的\n #_tcp_buf[sockets].payload[offset:offset+len(pld)] = list(pld)", "def get_session_keys(conn, pairing_data):\n headers = {\n 'Content-Type': 'application/pairing+tlv8'\n }\n\n #\n # Step #1 ios --> accessory (send verify start Request) (page 47)\n #\n ios_key = py25519.Key25519()\n\n request_tlv = TLV.encode_list([\n (TLV.kTLVType_State, TLV.M1),\n (TLV.kTLVType_PublicKey, ios_key.pubkey)\n ])\n\n conn.request('POST', '/pair-verify', request_tlv, headers)\n resp = conn.getresponse()\n response_tlv = TLV.decode_bytes(resp.read())\n\n #\n # Step #3 ios --> accessory (send SRP verify request) (page 49)\n #\n assert TLV.kTLVType_State in response_tlv, response_tlv\n assert response_tlv[TLV.kTLVType_State] == TLV.M2\n assert TLV.kTLVType_PublicKey in response_tlv, response_tlv\n assert TLV.kTLVType_EncryptedData in response_tlv, response_tlv\n\n # 1) generate shared secret\n accessorys_session_pub_key_bytes = response_tlv[TLV.kTLVType_PublicKey]\n shared_secret = ios_key.get_ecdh_key(\n py25519.Key25519(pubkey=bytes(accessorys_session_pub_key_bytes), verifyingkey=bytes()))\n\n # 2) derive session key\n hkdf_inst = hkdf.Hkdf('Pair-Verify-Encrypt-Salt'.encode(), shared_secret, hash=hashlib.sha512)\n session_key = hkdf_inst.expand('Pair-Verify-Encrypt-Info'.encode(), 32)\n\n # 3) verify authtag on encrypted data and 4) decrypt\n encrypted = response_tlv[TLV.kTLVType_EncryptedData]\n decrypted = chacha20_aead_decrypt(bytes(), session_key, 'PV-Msg02'.encode(), bytes([0, 0, 0, 0]),\n encrypted)\n if decrypted == False:\n raise homekit.exception.InvalidAuth(\"step 3\")\n d1 = TLV.decode_bytes(decrypted)\n assert TLV.kTLVType_Identifier in d1\n assert TLV.kTLVType_Signature in d1\n\n # 5) look up pairing by accessory name\n accessory_name = d1[TLV.kTLVType_Identifier].decode()\n\n if pairing_data['AccessoryPairingID'] != accessory_name:\n raise homekit.exception.IncorrectPairingID(\"step 3\")\n \n accessory_ltpk = py25519.Key25519(pubkey=bytes(), verifyingkey=bytes.fromhex(pairing_data['AccessoryLTPK']))\n\n # 6) verify accessory's signature\n accessory_sig = d1[TLV.kTLVType_Signature]\n accessory_session_pub_key_bytes = response_tlv[TLV.kTLVType_PublicKey]\n accessory_info = accessory_session_pub_key_bytes + accessory_name.encode() + ios_key.pubkey\n if not accessory_ltpk.verify(bytes(accessory_sig), bytes(accessory_info)):\n raise homekit.exception.InvalidSignature(\"step 3\")\n\n # 7) create iOSDeviceInfo\n ios_device_info = ios_key.pubkey + pairing_data['iOSPairingId'].encode() + accessorys_session_pub_key_bytes\n\n # 8) sign iOSDeviceInfo with long term secret key\n ios_device_ltsk_h = pairing_data['iOSDeviceLTSK']\n ios_device_ltsk = py25519.Key25519(secretkey=bytes.fromhex(ios_device_ltsk_h))\n ios_device_signature = ios_device_ltsk.sign(ios_device_info)\n\n # 9) construct sub tlv\n sub_tlv = TLV.encode_list([\n (TLV.kTLVType_Identifier, pairing_data['iOSPairingId'].encode()),\n (TLV.kTLVType_Signature, ios_device_signature)\n ])\n\n # 10) encrypt and sign\n encrypted_data_with_auth_tag = chacha20_aead_encrypt(bytes(), session_key, 'PV-Msg03'.encode(), bytes([0, 0, 0, 0]),\n sub_tlv)\n tmp = bytearray(encrypted_data_with_auth_tag[0])\n tmp += encrypted_data_with_auth_tag[1]\n\n # 11) create tlv\n request_tlv = TLV.encode_list([\n (TLV.kTLVType_State, TLV.M3),\n (TLV.kTLVType_EncryptedData, tmp)\n ])\n\n # 12) send to accessory\n conn.request('POST', '/pair-verify', request_tlv, headers)\n resp = conn.getresponse()\n response_tlv = TLV.decode_bytes(resp.read())\n\n #\n # Post Step #4 verification (page 51)\n #\n if TLV.kTLVType_Error in response_tlv:\n error_handler(response_tlv[TLV.kTLVType_Error], \"verification\")\n assert TLV.kTLVType_State in response_tlv\n assert response_tlv[TLV.kTLVType_State] == TLV.M4\n\n # calculate session keys\n hkdf_inst = hkdf.Hkdf('Control-Salt'.encode(), shared_secret, hash=hashlib.sha512)\n controller_to_accessory_key = hkdf_inst.expand('Control-Write-Encryption-Key'.encode(), 32)\n\n hkdf_inst = hkdf.Hkdf('Control-Salt'.encode(), shared_secret, hash=hashlib.sha512)\n accessory_to_controller_key = hkdf_inst.expand('Control-Read-Encryption-Key'.encode(), 32)\n\n return controller_to_accessory_key, accessory_to_controller_key", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def Get_Connection(self, request, context: grpc.ServicerContext) \\\n -> Ot2Controller_pb2.Get_Connection_Responses:\n connection_info = silaFW_pb2.String(value=\"Device IP: \"\n + self.device_ip\n + \", SSH key fingerprint: \"\n + self.pkey.get_fingerprint().hex())\n\n return Ot2Controller_pb2.Get_Connection_Responses(Connection=connection_info)", "def ProtocolInformation(self) -> _n_0_t_7[_n_0_t_6]:", "def run(self):\n\n # Select the packets and formats to send.\n # Array format:\n # 0: self.destination_host\n # 1: self.destination_port\n # 2: version\n # 3: cipher_list\n # 4: cipher_order\n # 5: GREASE\n # 6: RARE_APLN\n # 7: 1.3_SUPPORT\n # 8: extension_orders\n\n # Possible versions: SSLv3, TLS_1, TLS_1.1, TLS_1.2, TLS_1.3\n # Possible cipher lists: ALL, NO1.3\n # GREASE: either NO_GREASE or GREASE\n # APLN: either APLN or RARE_APLN\n # Supported Verisons extension: 1.2_SUPPPORT, NO_SUPPORT, or 1.3_SUPPORT\n # Possible Extension order: FORWARD, REVERSE\n\n queue = [\n [self.destination_host, self.destination_port, \"TLS_1.2\", \"ALL\", \"FORWARD\", \"NO_GREASE\", \"APLN\", \"1.2_SUPPORT\", \"REVERSE\"],\n [self.destination_host, self.destination_port, \"TLS_1.2\", \"ALL\", \"REVERSE\", \"NO_GREASE\", \"APLN\", \"1.2_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.2\", \"ALL\", \"TOP_HALF\", \"NO_GREASE\", \"APLN\", \"NO_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.2\", \"ALL\", \"BOTTOM_HALF\", \"NO_GREASE\", \"RARE_APLN\", \"NO_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.2\", \"ALL\", \"MIDDLE_OUT\", \"GREASE\", \"RARE_APLN\", \"NO_SUPPORT\", \"REVERSE\"],\n [self.destination_host, self.destination_port, \"TLS_1.1\", \"ALL\", \"FORWARD\", \"NO_GREASE\", \"APLN\", \"NO_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.3\", \"ALL\", \"FORWARD\", \"NO_GREASE\", \"APLN\", \"1.3_SUPPORT\", \"REVERSE\"],\n [self.destination_host, self.destination_port, \"TLS_1.3\", \"ALL\", \"REVERSE\", \"NO_GREASE\", \"APLN\", \"1.3_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.3\", \"NO1.3\", \"FORWARD\", \"NO_GREASE\", \"APLN\", \"1.3_SUPPORT\", \"FORWARD\"],\n [self.destination_host, self.destination_port, \"TLS_1.3\", \"ALL\", \"MIDDLE_OUT\", \"GREASE\", \"APLN\", \"1.3_SUPPORT\", \"REVERSE\"],\n ]\n\n # Assemble, send, and decipher each packet.\n for test in queue:\n payload = self._packet_building(test)\n server_hello, ip = self._send_packet(payload)\n\n # Deal with timeout error.\n if server_hello == \"TIMEOUT\":\n self._jarm_raw = \"|||,|||,|||,|||,|||,|||,|||,|||,|||,|||\"\n break\n\n ans = self._read_packet(server_hello, test)\n self._jarm_raw += ans\n self._jarm_raw += \",\"\n\n self._jarm_raw = self._jarm_raw.rstrip(\",\")\n self._jarm_hash = self._calculate_hash(self._jarm_raw)", "def getMessage(self):\n try:\n self.conn, self.addr = self._socket.accept()\n data = self.conn.recv(128000000)\n self.conn.send(\"ACK\")\n except socket.error:\n data = \"no data\"\n return data.split(';')", "def _connectDone(self):\n self.protocol = self.connector.buildProtocol(self.getPeer())\n self.connected = 1\n self.disconnected = 0\n self.disconnecting = 0\n self.logstr = self.protocol.__class__.__name__ + \",client\"\n self.protocol.makeConnection(self)", "def _bluetooth_check_profile_connection(self):\n profiles = dict()\n output = self.dut.get_conn_devices()\n # need to strip all whitespaces.\n conn_devs = {}\n\n for key in output:\n conn_devs[key.strip()] = output[key].strip()\n for key in conn_devs:\n self.logger.info('%s:%s' % (key, conn_devs[key]))\n if 'XXXXXXXX' in conn_devs[key]:\n profiles[key] = conn_devs[key]\n else:\n profiles[key] = False\n return profiles", "def test_10_9_4_2_3_1_3(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # First successful Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)\n del request, response\n\n # operationState is missing\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['response']['responseCode'], 102)", "def comRxHeader(self):\n if DEBUG > 1: sys.stderr.write(\"* comRxHeader()\\n\")\n\n hdr = self.serialport.read(1)\n if not hdr: raise BSLException(\"Timeout\")\n rxHeader = hdr[0] & 0xf0\n rxNum = hdr[0] & 0x0f\n\n if self.protocolMode == self.MODE_BSL:\n self.reqNo = 0\n self.seqNo = 0\n rxNum = 0\n if DEBUG > 1: sys.stderr.write(\"* comRxHeader() OK\\n\")\n return rxHeader, rxNum", "def after_get_onc_ctd(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure\": [],\n \"success SCVIP\": [],\n \"success SEVIP\": [],\n \"success USDDL\": [],\n }\n if msg.type.startswith(\"success\"):\n ctd_stn = msg.type.split()[1]\n next_workers[msg.type].append(\n NextWorker(\"nowcast.workers.ping_erddap\", args=[f\"{ctd_stn}-CTD\"])\n )\n return next_workers[msg.type]", "def process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns, ts_syn_timeout, ts_timeout):\n # The sender of the first SYN is the client\n # Check if the connection is black listed or not\n conn_id = False\n conn_candidates = inverse_conns.get((saddr, sport, daddr, dport), [])\n min_delta = ts_syn_timeout\n for cid in conn_candidates:\n if abs((ts_delta - connections[cid].flow.attr[co.START]).total_seconds()) < min_delta:\n conn_id = cid\n min_delta = abs((ts_delta - connections[cid].flow.attr[co.START]).total_seconds())\n\n if not conn_id:\n black_list.add((saddr, sport, daddr, dport))\n return\n elif conn_id and (saddr, sport, daddr, dport) in black_list:\n black_list.remove((saddr, sport, daddr, dport))\n\n if conn_id not in nb_acks[co.C2S]:\n for direction in co.DIRECTIONS:\n nb_acks[direction][conn_id] = {}\n\n backup = detect_backup_subflow(tcp)\n\n if ((saddr, sport, daddr, dport) in acks and (ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]).total_seconds() <= ts_syn_timeout\n and acks[saddr, sport, daddr, dport][co.S2C] == -1 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]):\n # SYN retransmission!\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n else:\n acks[saddr, sport, daddr, dport] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, co.CONN_ID: conn_id,\n SEQ_C2S: set([tcp.seq]), SEQ_S2C: set([]), HSEQ_C2S: {tcp.seq: [ts_delta, ts_delta]}, HSEQ_S2C: {}}\n connections[conn_id].attr[co.BACKUP] = backup", "def extract_connection_properties(self):\n try:\n localdict = {\"MQTT\" : {\\\n \"address\" : self.config_handle['MQTT']['MQTTBrokerAddress'],\\\n \"port\" : int(self.config_handle['MQTT']['MQTTBrokerPort']),\\\n \"topic\" : self.config_handle['MQTT']['MQTTTopic']},\\\n \"KAFKA\" : {\\\n \"address\" : self.config_handle['Kafka']['KafkaBrokerAddress'],\\\n \"port\" : int(self.config_handle['Kafka']['KafkaBrokerPort']),\\\n \"topic\" : self.config_handle['Kafka']['KafkaTopic']}\\\n }\n self.connection_information.append(localdict)\n except:\n print(\"Error in extracting connection properties\")", "def handle_packet(self, clw, address, connection, channel, name, packet, db):\n command = clw.head.command\n request = None\n if command == GATEWAY.T_MESSAGE_TYPE.LOGIN: # T1\n logging.info(\"[GW] Thread%s recv login packet:\\n%s\", name, packet)\n login.handle_login(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis) \n elif command == GATEWAY.T_MESSAGE_TYPE.HEARTBEAT: # T2\n logging.info(\"[GW] Thread%s recv heartbeat packet:\\n%s\", name, packet)\n heartbeat.handle_heartbeat(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.LOCATIONDESC: # T10\n logging.info(\"[GW] Thread%s recv locationdesc packet:\\n%s\", name, packet)\n locationdesc.handle_locationdesc(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.CONFIG: # T17\n logging.info(\"[GW] Thread%s recv query config packet:\\n%s\", name, packet)\n config.handle_config(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.DEFENDSTATUS: # T18, #NOTE: deprecated\n logging.info(\"[GW] Thread%s recv defend status packet:\\n%s\", name, packet)\n defend.handle_defend(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.FOBINFO: # T19 #NOTE: deprecated \n logging.info(\"[GW] Thread%s recv fob info packet:\\n%s\", name, packet)\n fob.handle_fob_info(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.SLEEPSTATUS: # T21\n logging.info(\"[GW] Thread%s recv sleep status packet:\\n%s\", name, packet)\n sleep.handle_sleep(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.FOBSTATUS: # T22, #NOTE: deprecated\n logging.info(\"[GW] Thread%s recv fob status packet:\\n%s\", name, packet)\n fob.handle_fob_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.RUNTIMESTATUS: # T23\n logging.info(\"[GW] Thread%s recv runtime status packet:\\n%s\", name, packet)\n runtime.handle_runtime(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.UNBINDSTATUS: # T24\n logging.info(\"[GW] Thread%s recv unbind status packet:\\n%s\", name, packet)\n unbind.handle_unbind_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.UNUSUALACTIVATE: # T27\n logging.info(\"[GW] Thread%s recv unusual activate packet:\\n%s\", name, packet)\n unusual.handle_unusual(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.MISC: # T28\n logging.info(\"[GW] Thread%s recv misc packet:\\n%s\", name, packet)\n misc.handle_misc(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.ACC_STATUS: # T30\n logging.info(\"[GW] Thread%s recv power status packet:\\n%s\", name, packet)\n acc.handle_acc_status(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n elif command == GATEWAY.T_MESSAGE_TYPE.ACC_STATUS_REPORT: # T31\n logging.info(\"[GW] Thread%s recv power status report packet:\\n%s\", name, packet)\n acc.handle_acc_status_report(clw, address, connection, channel, self.exchange, self.gw_binding, db, self.redis)\n else: #NOTE: otherswill be forwar to SI server\n #(T13, T14, T15, T16, T26, T29) \n logging.info(\"[GW] Thread%s recv packet from terminal:\\n%s\", name, packet)\n self.foward_packet_to_si(clw, packet, address, connection, channel, db)", "def add_new_connections(session, cobj, conn_list, at_date):\n start_at = int(at_date.gps)\n data = []\n\n for conn in conn_list:\n cobj.connection(\n upstream_part=conn[0],\n up_part_rev=conn[1],\n downstream_part=conn[3],\n down_part_rev=conn[4],\n upstream_output_port=conn[2],\n downstream_input_port=conn[5],\n start_gpstime=start_at,\n stop_gpstime=None,\n )\n print(\"Starting connection {} at {}\".format(cobj, str(at_date)))\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"upstream_part\",\n cobj.upstream_part,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"up_part_rev\",\n cobj.up_part_rev,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"downstream_part\",\n cobj.downstream_part,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"down_part_rev\",\n cobj.down_part_rev,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"upstream_output_port\",\n cobj.upstream_output_port,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"downstream_input_port\",\n cobj.downstream_input_port,\n ]\n )\n data.append(\n [\n cobj.upstream_part,\n cobj.up_part_rev,\n cobj.downstream_part,\n cobj.down_part_rev,\n cobj.upstream_output_port,\n cobj.downstream_input_port,\n cobj.start_gpstime,\n \"start_gpstime\",\n cobj.start_gpstime,\n ]\n )\n with mc.MCSessionWrapper(session=session) as session:\n update_connection(session, data, True)", "def get_connections_out(self) -> dict:\n return self.__ni_out", "def handle_connect(self):\n #print \"Switch initiated on: %s:%s\" % (self.address, self.port)\n self.buffer.append(messages.of_hello)\n self.buffer.append(messages.of_features_request)\n self.buffer.append(messages.of_set_config)", "def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def __detect_connected_sap (port):\n connected_port = [l.dst for u, v, l in\n nffg.real_out_edges_iter(port.node.id)\n if str(l.src.id) == str(port.id)]\n # If the number of detected nodes is unexpected continue to the next req\n if len(connected_port) < 1:\n log.warning(\"Skip edge rebinding: No connected node is detected for \"\n \"SAP port: %s\" % port)\n return None\n elif len(connected_port) > 1:\n log.warning(\"Skip edge rebinding: Multiple connected nodes are \"\n \"detected for SAP port: %s: %s!\" % (port, connected_port))\n return None\n elif connected_port[0].node.type == NFFG.TYPE_SAP:\n return connected_port[0]\n else:\n return None" ]
[ "0.5511609", "0.5456236", "0.5397558", "0.53674424", "0.5341668", "0.5300616", "0.52610534", "0.5249031", "0.5168756", "0.5108904", "0.5044106", "0.5025528", "0.50196034", "0.49723715", "0.4895106", "0.48823294", "0.4870531", "0.48611027", "0.4818591", "0.4818538", "0.48022884", "0.47958198", "0.47927332", "0.47881642", "0.47863764", "0.4780633", "0.47642824", "0.47637206", "0.47362474", "0.47344252", "0.4733505", "0.47299334", "0.46874666", "0.46688387", "0.4656648", "0.4653164", "0.46393612", "0.4632015", "0.46237725", "0.461883", "0.46162006", "0.46118358", "0.46005553", "0.45839137", "0.4580058", "0.45794374", "0.45708713", "0.45704174", "0.45699173", "0.45693338", "0.45569298", "0.45465207", "0.45454216", "0.45428136", "0.4540007", "0.45258185", "0.45094696", "0.45043573", "0.44852278", "0.44816178", "0.44776854", "0.44774503", "0.44762188", "0.44682354", "0.44538766", "0.44525126", "0.4447567", "0.4445286", "0.4444873", "0.4442126", "0.4441963", "0.44384834", "0.44383335", "0.4434659", "0.44261512", "0.44249302", "0.44215363", "0.44168442", "0.4413266", "0.44087788", "0.4406235", "0.44012785", "0.4400453", "0.44004443", "0.43957037", "0.43930763", "0.4385698", "0.4383582", "0.4380101", "0.43792287", "0.4377147", "0.43757975", "0.43740037", "0.43689838", "0.43683213", "0.43675902", "0.43641675", "0.4360766", "0.435974", "0.43577683" ]
0.7726427
0
Report the number of payload bytes cumulatively exchanged over each MPTCP connection
def payload_data(self, pkts): #Get all the payload bytes exchanged over MPTCP connections payload_bytes = 0 print "Determining the number of payload bytes excluding headers...." #DSS = 0x2 for i in range(len(pkts)): if(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]): payload_bytes += len(pkts[i][Raw].load) #print("DSN: %s; subflow_seqnum: %s; Data(bytes): %s" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load))) print "Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s" % (payload_bytes) #MPTCP WITH SUBFLOW CONNECTIONS #MPTCP_JOINs = 0x1 print "============================================================" print "SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)" for i in range(len(pkts)): #Initial Join Message #rcv_token Identifies the connection to which the subflow belongs: connectionID if(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1): print("New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce)) #TODO: Now Need to track per-connection and per-subflow state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BytesTransferred(self) -> int:", "def SendPacketsSendSize(self) -> int:", "def delta_bytes(self):\n return sum(self.fcip_doc['packet_lengths'])", "def getPacketCount(self):\n return 1", "def mptcp_connections(self, pkts):\n\t\tcount = 0\n\t\t#MPTCP_Capable = 0x0\n\t\t#MPTCP_CapableACK ---> successful handshake\n\t\tprint \"======================================================================\"\n\t\tprint \"Successful Handshake --- Look for Ack packets with MPTCP option Header\"\n\t\tprint \"\"\"Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from\n\t\t either step 2 or 3 in the first handshake)\"\"\"\n\t\tprint \"Total packets: %s\" % len(pkts)\n\t\tprint \"======================================================================\"\n\t\tprint \"Identifying MPTCP Connections....\"\n\t\tfor i in range(len(pkts)):\n\t\t\tif(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0):\n\t\t\t\tcount +=1 #Count the number of distinct MPTCP connections\n\t\t\t\t\n\t\t\t\t#Compute the receiver's token\n\t\t\t\tself.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key\n\t\t\t\tself.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv)\n\n\t\t\t\t#Compute the sender's token\n\t\t\t\tself.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key\n\t\t\t\tself.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd)\n\n\t\t\t\tprint (\"%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s\" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token))\n\t\tprint \"Total MPTCP Connections: %i\" % count", "def pending_nb_bytes(self):\n if self.df_length is not None:\n if self.df_length > 0:\n return self.df_length - len(self.buf)\n\n if self.cf_length is not None:\n if self.cf_length > 0:\n return self.cf_length - len(self.buf)\n \n return 4", "def print_transferred_data(self):\n\n for protocol in self.protocols.itervalues():\n print \"Transfer to peer %d: %d bytes in %d packets\" % \\\n (protocol.peer_id, protocol.sent_bytes, protocol.sent_packets)", "def DownloadedPacketCount(self):\n if self.force_auto_sync:\n self.get('DownloadedPacketCount')\n return self._DownloadedPacketCount", "def comptotalrxpackets(self) :\n\t\ttry :\n\t\t\treturn self._comptotalrxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def CapturedPacketCount(self):\n if self.force_auto_sync:\n self.get('CapturedPacketCount')\n return self._CapturedPacketCount", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def comptotaltxpackets(self) :\n\t\ttry :\n\t\t\treturn self._comptotaltxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def get_total_and_retrans_frames(pcap_filepath, connections):\n # First init values to avoid strange errors if connection is empty\n for conn_id, conn in connections.iteritems():\n for direction in co.DIRECTIONS:\n connections[conn_id].flow.attr[direction][co.FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.FRAMES_RETRANS] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_RETRANS] = 0\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_total\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats(None, pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n # Manage case with ipv6\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_TOTAL] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_TOTAL] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_TOTAL] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_TOTAL] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_retrans\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats('tcp.analysis.retransmission', pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_RETRANS] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_RETRANS] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_RETRANS] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_RETRANS] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def comptotalrxbytes(self) :\n\t\ttry :\n\t\t\treturn self._comptotalrxbytes\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n # replica + max wait + min bytes + len(topics)\n size = self.HEADER_LEN + 4 + 4 + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size", "def network_byte_length(self) -> int:", "def rx_packet_count(self):\n return self._rx_packet_count", "def bytes_pumped(self):\n return self.j_pump.bytesPumped()", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def getconnectioncount(self):\n return self.proxy.getconnectioncount()", "def bytes_transferred(self):\n return self._bytes_transferred", "def payload_length(self):\n return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4", "def get_bytes(records):\n return sum(r.transferred for r in records)", "def __len__(self):\n # Header + replicaId + len(topics)\n size = self.HEADER_LEN + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size", "def snmpqosqos_sch_sessions_byte_countrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_byte_countrate\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n size = self.HEADER_LEN + 2 + 4 + 4 # acks + timeout + len(topics)\n for topic, parts in iteritems(self.msets):\n # topic name\n size += 2 + len(topic) + 4 # topic name + len(parts)\n # partition + mset size + len(mset)\n size += sum(4 + 4 + len(mset) for mset in itervalues(parts))\n return size", "def sync(self):\n self.send()\n detail_count = summary_count = 0\n while self.responses:\n response = self.responses[0]\n while not response.complete:\n detail_delta, summary_delta = self.fetch()\n detail_count += detail_delta\n summary_count += summary_delta\n return detail_count, summary_count", "def pcp_process_count(self):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\t\t\n\t\tself._PCPWrite('N'.encode(), 1)\n\t\twsize = self.int_to_bytes(4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"N\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('N')", "def pcp_node_count(self):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\t\t\n\t\tself._PCPWrite('L'.encode(), 1)\n\t\twsize = self.int_to_bytes(4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"L\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('L')", "def bufferCnt():\n if(reset == 1):\n bufferCounter.next = 0\n else:\n if(decimationRatio > 0):\n if(bufferCounter == (decimationRatio-1)):\n bufferCounter.next = 0\n else:\n bufferCounter.next = bufferCounter + 1", "def snmpqosqos_sch_sessions_byte_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_byte_count\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n count = 0\n for recovery_set in self.recovery_sets.values():\n count += len(recovery_set.packets)\n return count", "def comptcptotalrxpackets(self) :\n\t\ttry :\n\t\t\treturn self._comptcptotalrxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def get_size(self):\n return len(self.get_payload()) + 4", "def __len__(self):\n # Header + group id + session timeout\n size = self.HEADER_LEN + 2 + len(self.group_id) + 4\n # + member id + protocol type + len(group protocols)\n size += 2 + len(self.member_id) + 2 + len(self.protocol_type) + 4\n # metadata tuples\n for name, metadata in self.group_protocols:\n size += 2 + len(name) + 4 + len(metadata)\n return size", "def __len__(self):\n return self.HEADER_LEN + 4 + sum(len(t) + 2 for t in self.topics)", "def comptotaltxbytes(self) :\n\t\ttry :\n\t\t\treturn self._comptotaltxbytes\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n if self.compression_type == CompressionType.NONE:\n messages = self._messages\n else:\n # The only way to get __len__ of compressed is to compress.\n # Store that so we don't have to do it twice\n if self._compressed is None:\n self._compressed = self._get_compressed()\n messages = [self._compressed]\n return (8 + 4) * len(messages) + sum(len(m) for m in messages)", "def packet_count(request):\n return request.param", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def get_bytes_consumed(self):\n total = 0\n for event in self.iter_events(EVENT_NAME_BYTES_CONSUMED):\n total += event.data[\"bytes_consumed\"]\n\n return total", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def ReceiveBufferSize(self) -> int:", "def ReceiveBufferSize(self) -> int:", "def input_mb(self):\n total_input_bytes = sum([t.remote_mb_read + t.local_mb_read for t in self.tasks if t.has_fetch])\n total_input_bytes += sum([t.input_mb for t in self.tasks])\n return total_input_bytes", "def AttemptDownloadPacketCount(self):\n if self.force_auto_sync:\n self.get('AttemptDownloadPacketCount')\n return self._AttemptDownloadPacketCount", "def rtt_get_num_up_buffers(self):\n cmd = enums.JLinkRTTCommand.GETNUMBUF\n dir = ctypes.c_int(enums.JLinkRTTDirection.UP)\n return self.rtt_control(cmd, dir)", "def test_total_renegotiations(self):\n connection = Connection(Context(SSLv23_METHOD), None)\n assert connection.total_renegotiations() == 0", "def num_bytes(self) -> str:\n return pulumi.get(self, \"num_bytes\")", "def bytes_copied(self) -> float:\n return pulumi.get(self, \"bytes_copied\")", "def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks", "def comptcptotaltxpackets(self) :\n\t\ttry :\n\t\t\treturn self._comptcptotaltxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def comptcptotalrxbytes(self) :\n\t\ttry :\n\t\t\treturn self._comptcptotalrxbytes\n\t\texcept Exception as e:\n\t\t\traise e", "def _bytes_per_record(channel, header):\n num_samples = header['samples_per_record'][channel]\n return num_samples * _RAW_INT_SIZE", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def test_size():\n assert Packet106.size == 12", "def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - (ip.hl * 4 + ip.data.off * 4)", "def load(self):\n total = sum(self.connections.values())\n return total", "def message_count(self):\n pass", "def __len__(self):\n # Header + consumer group + len(topics)\n size = self.HEADER_LEN + 2 + len(self.consumer_group) + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition => for each partition\n size += 4 * len(parts)\n return size", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def outstanding(self):\n return self._sent - self._received - self._errors", "def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def get_received_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def __len__(self):\n\n return len(self.read_counter)", "def check_net(self, values):\n try:\n net_io = psutil.net_io_counters()\n values[keys.KEY_NETWORK_BYTES_SENT] = net_io.bytes_sent\n values[keys.KEY_NETWORK_BYTES_RECEIVED] = net_io.bytes_recv\n if self.last_net_io is not None:\n values[keys.KEY_NETWORK_BYTES_SENT_PER_SAMPLE] = net_io.bytes_sent - self.last_net_io.bytes_sent\n values[keys.KEY_NETWORK_BYTES_RECEIVED_PER_SAMPLE] = net_io.bytes_recv - self.last_net_io.bytes_recv\n self.last_net_io = net_io\n except:\n logging.error(\"Error collecting network stats.\")", "def totalConnections(analyzer):\n return model.totalConnections(analyzer)", "def _created_connections(self):\n return len(self._available_connections) + len(self._in_use_connections)", "def total_bytes(self,pool=None,status=None):\n\t\tttl = 0\n\t\tfor ele in self.elements:\n\t\t\tif pool and ele.pool not in pool:\n\t\t\t\tcontinue\n\t\t\tif status and ele.status not in status:\n\t\t\t\tcontinue\n\t\t\tttl+=ele.st_size\n\t\treturn ttl", "def SendBufferSize(self) -> int:", "def SendBufferSize(self) -> int:", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def snmpqosqos_bytes_tx(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_tx\n\t\texcept Exception as e:\n\t\t\traise e", "def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "def _read_amt(self, byte_count):\n full_msg = bytearray()\n while len(full_msg) < byte_count:\n block = self.request.recv(byte_count - len(full_msg))\n full_msg.extend(block)\n return full_msg", "def update(self):\n #*** Get dictionary of NICs with results from psutil:\n os_net = psutil.net_io_counters(pernic=True)\n #*** Update our variables including delta values:\n for interface in os_net:\n #*** Packets in:\n pkts_in = os_net[interface].packets_recv\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_pkts_in:\n self.prev_pkts_in[interface] = 0\n if not interface in self.delta_pkts_in:\n self.delta_pkts_in[interface] = 0\n\n #*** Calculate difference in packets in:\n if self.prev_pkts_in[interface]:\n self.delta_pkts_in[interface] = \\\n pkts_in - self.prev_pkts_in[interface]\n else:\n self.delta_pkts_in[interface] = 0\n self.prev_pkts_in[interface] = pkts_in\n\n #*** Packets out:\n pkts_out = os_net[interface].packets_sent\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_pkts_out:\n self.prev_pkts_out[interface] = 0\n if not interface in self.delta_pkts_out:\n self.delta_pkts_out[interface] = 0\n\n #*** Calculate difference in packets out:\n if self.prev_pkts_out[interface]:\n self.delta_pkts_out[interface] = \\\n pkts_out - self.prev_pkts_out[interface]\n else:\n self.delta_pkts_out[interface] = 0\n self.prev_pkts_out[interface] = pkts_out\n\n #*** Bytes in:\n bytes_in = os_net[interface].bytes_recv\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_bytes_in:\n self.prev_bytes_in[interface] = 0\n if not interface in self.delta_bytes_in:\n self.delta_bytes_in[interface] = 0\n\n #*** Calculate difference in bytes in:\n if self.prev_bytes_in[interface]:\n self.delta_bytes_in[interface] = \\\n bytes_in - self.prev_bytes_in[interface]\n else:\n self.delta_bytes_in[interface] = 0\n self.prev_bytes_in[interface] = bytes_in\n\n #*** Bytes out:\n bytes_out = os_net[interface].bytes_sent\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_bytes_out:\n self.prev_bytes_out[interface] = 0\n if not interface in self.delta_bytes_out:\n self.delta_bytes_out[interface] = 0\n\n #*** Calculate difference in bytes out:\n if self.prev_bytes_out[interface]:\n self.delta_bytes_out[interface] = \\\n bytes_out - self.prev_bytes_out[interface]\n else:\n self.delta_bytes_out[interface] = 0\n self.prev_bytes_out[interface] = bytes_out", "def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def payload_size(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_payload_size(self)", "def bytes_total(self):\n return int(self.status[\"pgmap\"][\"bytes_total\"])", "def __len__(self):\n # Header + len(self.consumer_group)\n return self.HEADER_LEN + 2 + len(self.consumer_group)", "def snmpqosqos_bytes_txrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_txrate\n\t\texcept Exception as e:\n\t\t\traise e", "def compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing MPTCP DSS retransmissions for\", pcap_filepath)\n acks = {}\n conn_acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n pcap_file.close()", "def test_size():\n assert Packet40.size == 2", "def count_bytes(self, deleted=False):\n b = 0\n for _, e in self.contents.items():\n b = b + e.count_bytes(deleted)\n return b", "def total_buffers_count(self) -> int:\n return self._counter", "def payload_length(self):\n return self._payload_length", "def vscp_pythia_transformed_tweak_buf_len(self):\n vscp_pythia_transformed_tweak_buf_len = self._lib.vscp_pythia_transformed_tweak_buf_len\n vscp_pythia_transformed_tweak_buf_len.argtypes = []\n vscp_pythia_transformed_tweak_buf_len.restype = c_size_t\n return vscp_pythia_transformed_tweak_buf_len()", "def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta", "def read_num_lines(data_socket):\r\n size_bytes = b''\r\n for i in range(0, 4):\r\n size_bytes += next_byte(data_socket)\r\n return int.from_bytes(size_bytes, 'big')", "def compute_link_utilization_over_time(link_byte_counts):\n def find_matching_iface_stats(byte_count, source_id, destination_id):\n matching_stats = [d_i for d_i in byte_count\n if d_i[\"sourceSwitchId\"] == source_id and\n d_i[\"destinationSwitchId\"] == destination_id]\n if len(matching_stats) != 1:\n raise ValueError(\"Unexpected results in find_matching_iface_stats. \\\n Found %d matching iface_stats\" % len(matching_stats))\n return matching_stats[0]\n\n def compute_tx_rate(count_in_bytes):\n return (count_in_bytes * 8) / 10.0**7\n\n # First compute the delta between the iface_stats in time_period t_i and the iface_stats\n # in time period t_{i+1}.\n # tx_rate_t: (source_id x destination_id) -> link_utilization_in_time_period_t forall. t\n tx_rate_t = []\n for t_0, t_1 in zip(link_byte_counts, link_byte_counts[1:]):\n byte_count_delta_t = defaultdict(float)\n for iface_stats in t_0:\n source_id = iface_stats[\"sourceSwitchId\"]\n destination_id = iface_stats[\"destinationSwitchId\"]\n t_0_count = iface_stats[\"bytesSent\"] + iface_stats[\"bytesReceived\"]\n try:\n t_1_stats = find_matching_iface_stats(t_1, source_id, destination_id)\n t_1_count = t_1_stats[\"bytesSent\"] + t_1_stats[\"bytesReceived\"]\n except ValueError:\n t_1_count = t_0_count\n\n count_delta = t_1_count - t_0_count\n link_key = compute_link_key(source_id, \n destination_id)\n byte_count_delta_t[link_key] += count_delta\n\n tx_rate_t.append({the_link_key: compute_tx_rate(byte_count_t) \n for the_link_key, byte_count_t in byte_count_delta_t.items()})\n return tx_rate_t", "def _get_lymphocytes_to_exchange(self):\n self.lock_to_exchange.acquire()\n lymphocytes = self.to_exchange[:]\n self.lock_to_exchange.release()\n return lymphocytes", "def get_count(self):\n return unpack(os.read(self.fd, 8))" ]
[ "0.66460896", "0.64858186", "0.6429485", "0.63838947", "0.6298005", "0.6263551", "0.60947376", "0.6089673", "0.6043087", "0.6028377", "0.6015029", "0.5988581", "0.5972827", "0.590944", "0.5890168", "0.5861066", "0.5850881", "0.5805821", "0.5804993", "0.5774528", "0.5753648", "0.5712562", "0.56971496", "0.5684847", "0.5668423", "0.56568676", "0.565415", "0.56442237", "0.5637436", "0.56311524", "0.5624291", "0.5617963", "0.56044745", "0.5596403", "0.5594393", "0.55769056", "0.55760366", "0.5557371", "0.5539818", "0.5528203", "0.5524005", "0.55187833", "0.5515018", "0.54841626", "0.5483857", "0.5478649", "0.54753304", "0.54753304", "0.5460139", "0.5457994", "0.5451745", "0.5443699", "0.5442801", "0.5431177", "0.54277515", "0.54261243", "0.5422372", "0.5413516", "0.54030746", "0.5395716", "0.53918296", "0.53829855", "0.5370648", "0.53435814", "0.5338802", "0.5337609", "0.5336565", "0.5332654", "0.5329928", "0.532883", "0.53240526", "0.5320368", "0.53196687", "0.5314613", "0.53115493", "0.53115493", "0.53011096", "0.52972627", "0.5294824", "0.52929074", "0.5283186", "0.5280917", "0.5276398", "0.5276042", "0.5275542", "0.5273315", "0.52649313", "0.52585745", "0.52584976", "0.52557063", "0.52508754", "0.5249053", "0.52488863", "0.5244886", "0.523826", "0.5236089", "0.5231836", "0.52181304", "0.5214157", "0.52136284" ]
0.64004153
3
Adds an input value to the averaging filter.
def Input(self, value): print("Input: {}".format(value)) # Add the new value to the filter values. self.Values.append(value) # If the filter has reached its maximum Depth, # pop the last item from the filter values. if len(self.Values) > self.Depth: s = 0 self.Values.pop(s) print("Filter ({}): {}".format(len(self.Values), self.Values))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_value(self, value):\n if (value - self.avg) ** 2 > \\\n self.deviation_scale * (self.std + self.deviation_offset):\n BaseFilter.add_value(self, self.avg)\n else:\n BaseFilter.add_value(self, value)", "def add_value(self, value):\n if len(self.hist) < 2:\n BaseFilter.add_value(self, value)\n else:\n filtered_value = self.hist[-1] * self.alpha + value * (1.0 - self.alpha)\n BaseFilter.add_value(self, filtered_value)", "def AvgFilter(self, number):\n\n alpha = (self.k-1)/self.k\n avg = alpha*self.prevAvg + (1-alpha)*number\n\n self.prevAvg = avg\n self.k = self.k + 1\n\n return avg", "def add_to_average(total_count, total_value, new_value):\n return ((1.0 * total_count * total_value) + new_value) / (total_count + 1)", "def averaging(self, value: int):\n self._averaging = value\n\n self.events.averaging()\n self._update_avg()\n\n self.refresh()", "def add(self, value):\n if value < self.ref_value:\n diff = value - self.ref_value\n self._average *= np.exp(self.beta * diff)\n self._average += value**self.order\n self.ref_value = value\n else:\n diff = value - self.ref_value\n self._average += np.exp(-self.beta * diff) * value**self.order\n self.num_samples += 1", "def avg_pooling(self, filter_):\n return self.add_layer(avg_pooling, filter_)", "def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val", "def update(\n self,\n value: float,\n weight: Optional[float] = None\n ):\n\n if weight is not None and not self.weighted:\n raise ValueError('Cannot pass a weight to an unweighted averager.')\n\n self.n += 1\n\n if self.has_alpha:\n step_size = self.alpha\n elif self.weighted:\n\n if weight is None:\n raise ValueError('The averager is weighted, so non-None values must be passed for weight.')\n\n self.cumulative_weight += weight\n step_size = weight / self.cumulative_weight\n\n else:\n step_size = 1 / self.n\n\n self.average = self.average + step_size * (value - self.average)", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def add_mean(mean):\n return sum(mean)/len(mean)", "def add_to_average(self, value, decay=1.0, weight=1.0):\n decay = tf.cast(decay, dtype=self.dtype)\n weight = tf.cast(weight, dtype=self.dtype)\n\n update_var = smart_assign(self._var, decay * self._var + weight * value)\n\n update_total_weight = smart_assign(self._total_weight,\n decay * self._total_weight + weight)\n\n return tf.group(update_var, update_total_weight)", "def add(self, term):\n self._value = self.accum_param.addInPlace(self._value, term)", "def average(self, key, value):\n self._average_metrics[key] += value\n self._average_metrics_count[key] += 1", "def average_input_decorator(sampler_function):\n def average_function(sampler):\n average_function.num_calls += 1\n average_function.sum_vector += sampler.parameters.vector\n sampler_params = sampler.parameters.vector\n sampler.parameters.vector = (average_function.sum_vector / \\\n average_function.num_calls)\n\n output = sampler_function(sampler)\n\n if isinstance(output, dict):\n output['variable'] = \"avg_\"+output['variable']\n else:\n for out in output:\n out['variable'] = \"avg_\"+out['variable']\n\n sampler.parameters.vector = sampler_params\n return output\n\n average_function.num_calls = 0\n average_function.sum_vector = 0.0\n return average_function", "def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n averaged_param.mul_(1 - self.momentum).add_(\n source_param, alpha=self.momentum)", "def __call__(self, new_val: float) -> float:\n self._past_values.append(new_val)\n return self.do_filter()", "def forward_avg(array_in):\n return (array_in[:-1] + array_in[1:]) * 0.5", "def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt", "def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1", "def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count", "def moving_avg_filter(data, filter_size=filter_size):\n filter_size = int(filter_size)\n smoothed = np.zeros(len(data))\n for n in range(filter_size, len(data) - filter_size):\n vals = data[n - filter_size : n + filter_size + 1]\n smoothed[n] = np.mean(vals)\n return smoothed", "def conditional_mean(self, F):\n raise NotImplementedError", "def append(self, sample):\n self.samples.append(sample)\n self.total += sample\n while len(self.samples) > self.maxlen:\n self.total -= self.samples.popleft()\n self.mean = float(self.total) / len(self.samples)", "def alteredWeightedAvgAxisPoints(self, var):\n varID = var.id\n var = cdutil.averager(var, axis=\"(%s)\" % self.axis.id,\n weight=self.alteredWeightsVar.filled())\n var.id = varID\n return var", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_iter = filter(lambda x: x > avg, data) # returns iterator for data above the avg\n\n print \"values strictly above average are:\", list(above_avg_iter)", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def add_mean_summary(name, value):\n if not value.dtype.is_floating:\n value = tf.cast(value, tf.float32)\n tf.summary.scalar(name, tf.reduce_mean(value))", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def add_aggregate_temp(self, value: float) -> float:\n # Check if aggregate samples are too old.\n if self.last_sample_time is not None:\n last_sample_time2 = datetime.fromtimestamp(self.last_sample_time)\n now = datetime.now()\n threshold_time = now - timedelta(hours=1)\n if last_sample_time2 < threshold_time:\n # Too old, clear samples.\n self.samples = []\n\n self.samples.append(value)\n self.samples = self.samples[-self.sample_size:]\n agg_value = reduce(\n lambda a, b: a + b,\n self.samples\n ) / len(self.samples)\n self.last_sample_time = datetime.now().timestamp()\n return agg_value", "def average_calc():\n addCount = 0\n addSum = 0\n \n while True:\n addInput = input(\"Please provide a number to add. Quit with 'done' \")\n if addInput == 'done':\n break\n \n addCount += 1\n addSum = addSum + int(addInput)\n addAveradge = addSum / addCount\n \n print(\"Total sum: \", addSum)\n print(\"Average: \", addAveradge)", "def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n momentum = 1. - self.rampup(self.steps, self.ema_kimg, self.ema_rampup,\n self.batch_size, self.eps)\n if not (0.0 < momentum < 1.0):\n warnings.warn('RampUp momentum must be in range (0.0, 1.0)'\n f'but got {momentum}')\n averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)", "def add_input(self, accumulator, element):\n raise NotImplementedError", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def add_filter(self, f):\n raise NotImplementedError", "def add(self, value):\n if not self.isGood:\n return\n\n # Convert value to bucket it lies in.\n if value < self.minX:\n bucket = 0\n elif value >= self.maxX:\n bucket = self.segments - 1\n else:\n bucket = int(self.segments * (value - self.minX) / (self.maxX - self.minX))\n self.counts[bucket] += 1\n\n self.lineplot.set_data(self.xs, self.counts)\n self.ax.relim()\n self.ax.autoscale_view() # rescale the y-axis", "def incavg(val = None):\n\n cnt = 0\n avg = 0\n \n if not val is None:\n cnt = 1\n avg = val\n\n while True:\n val = (yield avg)\n\n if val is None:\n pass # next was called\n elif cnt == 0: # first value\n cnt = 1\n avg = val\n else:\n cnt += 1\n avg = avg + (val - avg) / float(cnt)", "def averaging_factor(self):\n af = ct.c_uint()\n self.lib.Filter_GetAveragingFactor(ct.pointer(af))\n return af.value", "def modelmean(self, model_params, this_data, this_suff_stat):\n pass", "def AddSample(self, machine, timestamp, value):\n self.machine_data.setdefault(machine, list()).append([timestamp, value])\n if len(self.cluster_total) == 0 or timestamp > self.cluster_total[-1][0]:\n self.cluster_total.append([timestamp, 0])\n self.cluster_avg.append([timestamp, 0])\n self.cluster_total[-1][1] += value\n self.cluster_avg[-1][1] = self.cluster_total[-1][1] / float(len(self.machine_data))", "def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return", "def mean(vals):", "def set_average(self, avg, num_samples):\n self._average = avg * num_samples\n self.num_samples = num_samples", "def Output(self):\n filter_sum = 0\n # Calculate the sum of all filter values.\n for s in self.Values:\n filter_sum += s\n print(\"Sum: {}\".format(filter_sum))\n # The average is the sum divided by the current amount of\n # samples.\n avg = filter_sum / len(self.Values)\n print(\"Average: {}\".format(avg))\n\n if self.Round is True:\n # Round the average to the nearest integer.\n avg = round(avg)\n\n print(\"Output: {}\".format(avg))\n return avg", "def add_filter(self, name, value, comparator='equals',\n case_sensitive=False):\n self.filters.append({'name': name, 'value': value,\n 'comparator': comparator,\n 'case_sensitive': case_sensitive,\n 'type': 'filter'})", "def movavg(ave_list, length, value):\n ave_list.append(value)\n if length < len(ave_list):\n del ave_list[0]\n value = sum(ave_list)\n return value / len(ave_list)", "def add_value_mean(cls, sensor, values, device_id):\n if values[device_id] is not None:\n if sensor == 't':\n cls.mean_t.append(int(values[device_id][sensor]))\n if sensor == 'l':\n cls.mean_l.append(int(values[device_id][sensor]))", "def append_value(self, value):\n self.value += value", "def avg(a,b):\r\n return (a+b)/2", "def __iadd__(self,value):\n if isinstance(value,LiveStat):\n raise Exception(\"Cannot sum statistics\")\n if value.vcount < 1 or self.vcount < 1:\n raise Exception(\"Cannot sum empty statistics\")\n else:\n # sum of two considered pairwise: z_i = stat(x_i + y_i)\n #\n # data have different weights due to number of samples.. TODO\n self.vmin += value.vmin \n self.vmax += value.vmax\n self.vmean += value.vmean\n self.vsum += value.vsum\n # variance is sum of variance?\n self.vm2 += value.vm2\n # TODO vm3 vm4\n self.vcount = min(value.vcount,self.vcount)\n self.vcountsq = self.vcount**2\n self.dirty = True\n print (\"add Missing: M3 and M4\")\n else:\n # constant bias\n if self.vmin is not None:\n self.vmin += value\n self.vmax += value\n self.vmean += value\n self.vsum += self.vcount*value\n print (\"add Missing: M3 and M4\")\n self.dirty = True\n return self", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def y(self, val):\n if len(self.buf) < self.buf_n:\n self.buf.append(val)\n else:\n self.buf[self.buf_i] = val\n self.buf_i += 1\n self.avg = sum(self.buf) / len(self.buf)\n if self.buf_i == self.buf_n:\n self.buf_i = self.buf_i % self.buf_n \n if self.cas != None: \n self.cas.y(self.avg)\n self.dump() \n return self.avg", "def add_to_mean(current_mean, n, new_value, decimals=2):\n\n old_sum = current_mean * n\n\n # sanity check the provided n\n if n <= 0 or not isinstance(n, int):\n raise ValueError('Current n must be an integer greater than 0.')\n\n if isinstance(new_value, (int, float)):\n new_sum = old_sum + new_value\n\n return(round(new_sum / (n + 1), decimals))\n\n\n elif type(new_value) in [list, tuple]:\n new_sum = old_sum + sum(new_value)\n\n return(round(new_sum / (n + len(new_value)), decimals))\n\n else:\n raise TypeError('add_to_mean() requires the new value(s) to be an int, '\n 'float, or a list or tuple of ints and/or floats.')", "def add(self, value: float) -> None:\n self.rawValue = self.momentum * self.rawValue + (1 - self.momentum) * value\n self.i += 1", "def add(self, value):\n if self.squared:\n if isinstance(value, list):\n value = [np.square(x) for x in value]\n else:\n value = np.square(value)\n\n if isinstance(value, list):\n for i in range(0, len(value)):\n self.value[i] = np.multiply(self.decay, self.value[i]) + np.multiply((1. - self.decay), value[i])\n else:\n self.value = np.multiply(self.decay, self.value) + np.multiply((1. - self.decay), value)", "def iadd_scalar(self, other: float):\n self.set(self.gross + other)", "def add_to_score(self, to_add):\n self.score += to_add", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def Av(self, value):\n\n if value < 0.0:\n raise InputParameterError(\"parameter Av must be positive\")", "def make_averaged(fn, num_samples=1000):\n # BEGIN PROBLEM 8\n def average(*args):\n sum = 0\n for i in range(num_samples):\n sum += fn(*args)\n return sum / num_samples\n return average\n # END PROBLEM 8", "def avgAxisPoints(self, var):\n varID = var.id\n var = cdutil.averager(var, axis=\"(%s)\" % self.axis.id, weight='equal')\n var.id = varID\n return var", "def conditional_mean(self, gp):\n raise NotImplementedError", "def weightedAvgAxisPoints(self, var):\n varID = var.id\n var = cdutil.averager(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var", "def make_averaged(fn, num_samples=1000):\n # BEGIN PROBLEM 7\n def average_function(*args):\n counter = 0\n result = 0\n while(counter<num_samples):\n result_holder = fn(*args)\n result+= result_holder\n counter+=1\n return result/num_samples\n return average_function\n # END PROBLEM 7", "def getMean(self, windowSize=0):\r\n try:\r\n if self._data.size == 0:\r\n raise RuntimeError(\"Filter1D data is empty. Call Filter1D.addDataPoint() to add data prior calling Filter1D.getMean().\")\r\n if type(windowSize) is int:\r\n if windowSize <= 0 or windowSize > self._maxSize:\r\n windowSize = self._maxSize\r\n return np.mean(self._data[-windowSize:])\r\n else:\r\n raise TypeError(\"windowSize must be an integer\")\r\n except TypeError or RuntimeError:\r\n raise", "def add_input(self, input_value, log_level):\n\n self.input_parameters.append(input_value)\n if log_level >= 1:\n print(f\"Amplifier[{self.name}]: Input parameters: {self.input_parameters},\" +\n f\" input position: {self.input_position}\")", "def average(self, n=0):\n assert n >= 0\n for key in self.value_history:\n values = np.array(self.value_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n avg = np.sum(values * nums) / np.sum(nums)\n self.output[key] = avg", "def average(data):\n return np.average(data)", "def __add__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] += intensity\n else:\n output[wavelength] = intensity\n return output", "def _mean(listvalue):\n\treturn sum(listvalue)/len(listvalue)", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def add_Mag_Value(self, input):\n self.magflux = input", "def _update_accumulation(self, index, grad):\n self.accumulation[index] = self.accumulation[index] + grad**2", "def main():\n\n sum_of_values = 0.0\n count = 0\n\n x = input('Enter the next number in list to average. (\"q\" to exit) ')\n while 'q' not in x.lower():\n print ('x =', x)\n sum_of_values = sum_of_values + float(x)\n count += 1\n x = input('Enter number in list to avg. (\"q\" to exit) ')\n\n print(\"The average of {0} values is {1}\".format(\n count,\n sum_of_values / count))", "def make_averaged(fn, num_samples=1000):\n # BEGIN PROBLEM 8\n \"*** YOUR CODE HERE ***\"\n def return_average(*args):\n k, total = 0, 0\n while k < num_samples:\n total += fn(*args)\n k += 1\n return total / num_samples\n return return_average\n # END PROBLEM 8", "def append(self, avg, timestamp=None):\n self.data.append(avg)\n # add timestamp every second\n if not self._size % int(self.freq * self._seconds):\n if not timestamp:\n timestamp = time.time()\n self._timelist.append(timestamp)\n self._size += 1", "def ResetAvgFilter(self):\n self.k = 1\n self.prevAvg = 0", "def average(data, number=None):\n if number is None:\n return numpy.mean(data)\n return numpy.sum(data) / number", "def Mean_Filter(audio,M):\t\t\t\t\t\t\t\t\t\t\t\t# Function to apply Mean Filter to audio signal\n\tp,q,s = M,audio.shape[0]- M,audio.shape[0]\n\taudio_change = np.zeros(s+2*M)\n\taudio_change[M:s+M] = audio\n\taudio_new = np.zeros(s)\n\t\t\n\tfor i in range(M,s+M):\n\t\taudio_new[i-M] = np.mean(audio_change[i-M:i+M])\n\t\n\ttime = np.arange(s)\t\n\t\n\treturn audio_new,time", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def forward(self, input_x):\n adv, val = self.adv_val(input_x)\n return val + (adv - adv.mean(dim=1, keepdim=True))", "def forward(self, input_x):\n adv, val = self.adv_val(input_x)\n return val + (adv - adv.mean(dim=1, keepdim=True))", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update(self, value):\n if value < self.min:\n self.min = value\n if value > self.max:\n self.max = value\n self.total += value\n self.instances += 1\n self.values.append(value)", "def avg(x, y):\n return (x + y)/2", "def mean(self, mean):\n\n self._mean = mean", "def findMean (*args):\r\n total = my_module.addStuff(*args)\r\n return total/len(args)", "def with_sum_mean_reduction(self):\n return self.with_reduction(lambda x: x.sum(1).mean(0))", "def my_mean(x):\n return my_sum(x) / my_len(x)", "def average(self):\n return self.summation() / self.count()", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def set_input(self, input_value):\r\n self.sample += 1\r\n self.drift_found = self.drift_detector.set_input(input_value)\r\n if self.drift_found:\r\n self.timestamp += 1\r\n if self.buffer.is_full:\r\n result_buffer = self.buffer.add(self.timestamp)\r\n self.reservoir.add_element(result_buffer)\r\n else:\r\n self.buffer.add(self.timestamp)\r\n interval = self.timestamp\r\n self.recent_interval[self.rolling_index] = interval\r\n self.rolling_index += 1\r\n if self.rolling_index == self.reservoir.size * 2:\r\n self.rolling_index = 0\r\n self.timestamp = 0\r\n self.pre_drift_point = self.sample\r\n if self.buffer.is_full and self.reservoir.check_full():\r\n relative_var = self.buffer.get_stddev() / self.reservoir.get_stddev()\r\n if relative_var > (1.0 + self.confidence) or relative_var < (1.0 - self.confidence):\r\n self.buffer.clear()\r\n # self.severity_buffer[:] = []\r\n self.vol_drift_found = True\r\n else:\r\n self.vol_drift_found = False\r\n else:\r\n self.timestamp += 1\r\n self.vol_drift_found = False\r\n\r\n return self.vol_drift_found", "def do(self, x, typ):\n try:\n mean,stddev = self.mean_stddev()\n except StdDevFilterException:\n self.insert_value(x)\n return x,True\n\n self.insert_value(x)\n\n # limit dispersion, refuse new value when too far away from mean\n e = abs(x-mean)\n if e <= self.alpha*stddev:\n return x,True\n else:\n return mean,False", "def set_value(self, val):\n for i, v in enumerate(val):\n if v < self.allowable_min[i]:\n raise ValueError(\"{0}, {1} less than min value {2}, index {3}\".format(self.get_name(), val, self.min_found, i))\n if v > self.allowable_max[i]:\n raise ValueError(\"{0}, {1} greater than max value {2}, index {3}\".format(self.get_name(), val, self.max_found, i))\n\n self.min_found[i] = min(self.min_found[i], v)\n self.max_found[i] = max(self.max_found[i], v)\n\n n = self.count+1\n self.avg_found[i] = self.avg_found[i] * (self.count / n) + v * (1.0 / n)\n\n self.count += 1\n self.value = val", "def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg", "def mean(self, name, **kwargs):\n return np.mean(self.get(name,**kwargs))", "def average(arg1, *args): \n return (arg1 + sum(args)) / (1 + len(args))", "def average(self):\n return (self.current + self.last) / 2.0" ]
[ "0.74540794", "0.71780056", "0.6814798", "0.6716383", "0.6649134", "0.63110775", "0.6276187", "0.6150249", "0.6108856", "0.6071165", "0.60466886", "0.60204285", "0.6016385", "0.5957288", "0.5926967", "0.59223187", "0.5921518", "0.591883", "0.5895319", "0.57737994", "0.57539403", "0.5738388", "0.5669046", "0.5665694", "0.56563276", "0.56504476", "0.55806047", "0.55727893", "0.55699706", "0.556534", "0.556534", "0.5548686", "0.554235", "0.5541945", "0.5538315", "0.5526809", "0.5518838", "0.55080247", "0.5499027", "0.549821", "0.5496281", "0.5492284", "0.54740304", "0.5458252", "0.54520017", "0.54517365", "0.54396105", "0.54381627", "0.5426502", "0.5403173", "0.5394958", "0.5389248", "0.5371816", "0.53712827", "0.5353473", "0.5349121", "0.53429574", "0.53308684", "0.5329265", "0.5327938", "0.53213274", "0.52986205", "0.52981454", "0.52954316", "0.52925014", "0.5290837", "0.52770525", "0.5259892", "0.52561796", "0.5252883", "0.5251832", "0.52485055", "0.52477956", "0.5239223", "0.5233084", "0.52265066", "0.52249694", "0.52237904", "0.52220166", "0.52193016", "0.5218176", "0.5214793", "0.5203511", "0.5203511", "0.52018857", "0.51958597", "0.51879066", "0.5178951", "0.51781344", "0.51695776", "0.51553607", "0.5147389", "0.51472074", "0.5142966", "0.5138113", "0.51354784", "0.51346964", "0.51281685", "0.51280355", "0.51264495" ]
0.6106456
9
Calculates the average of the current filter values.
def Output(self): filter_sum = 0 # Calculate the sum of all filter values. for s in self.Values: filter_sum += s print("Sum: {}".format(filter_sum)) # The average is the sum divided by the current amount of # samples. avg = filter_sum / len(self.Values) print("Average: {}".format(avg)) if self.Round is True: # Round the average to the nearest integer. avg = round(avg) print("Output: {}".format(avg)) return avg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average(self):\n return self.summation() / self.count()", "def average(self):\n return (self.current + self.last) / 2.0", "def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def averaging_factor(self):\n af = ct.c_uint()\n self.lib.Filter_GetAveragingFactor(ct.pointer(af))\n return af.value", "def calculateAverage(self): \n if not self.lastTransferAverage: \n size=[0,0,0,0]\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n size[i]=self.lastNbrSamplesPerSeg\n self.lastAverageArray = [zeros(size[0]),zeros(size[1]),zeros(size[2]),zeros(size[3])]\n nbrSamp=self.lastNbrSamplesPerSeg\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n nbrSeg=self.lastNbrSegmentsArray[i]\n for j in range (0,nbrSamp):\n for k in range(0,nbrSeg): \n self.lastAverageArray[i][j]+=self.lastWaveformArray[i][k*nbrSamp+j]\n self.lastAverageArray[i][j]/=nbrSeg\n self.lastAverageCalculated=True\n else: print \"NOn averaged data are not available\"", "def AvgFilter(self, number):\n\n alpha = (self.k-1)/self.k\n avg = alpha*self.prevAvg + (1-alpha)*number\n\n self.prevAvg = avg\n self.k = self.k + 1\n\n return avg", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def average(self):\n if self._average is None: # only first time\n self._average = self._obj.mean(dim='t')\n self._average.attrs = self._obj.attrs # we need units in quiver\n\n return self._average", "def average(values):\n\treturn sum(values)/len(values)", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3", "def average(values):\n return sum(values) / len(values)", "def average(values):\n return sum(values) / len(values)", "def avg(values):\n return sum(values) / float(len(values))", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg", "def mean(self):\n return self.sum / self.sum_weights", "def average(data):\n return np.average(data)", "def average(self):\n return np.mean(self.buf[:self._size], axis=0)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def average(self):\n if self._average is None:\n self._average = sum([df.df for df in self])/len(self)\n return self._average", "def average_rating(self):\n ratings = AttractionRating.objects.filter(attraction=self)\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n # If there are no rating, then we set the average to 0\n # otherwise we calculate the average\n try:\n avg = total_rating / len(ratings)\n except ZeroDivisionError:\n avg = total_rating\n\n return avg", "def average(self):\n return self.properties.get('average')", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def avg_pooling(self, filter_):\n return self.add_layer(avg_pooling, filter_)", "def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def ave(values):\n return float(sum(values))/len(values)", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def average_weights(self):\n for feat, weight in self.weights.items():\n total = self._totals[feat]\n total += (self.i - self._tstamps[feat]) * weight\n averaged = total / float(self.i)\n self.weights[feat] = averaged\n return None", "def _update_avg(self):\n if self._data_type == 'coords':\n # default averaging is supported only for 'matrix' dataTypes\n return\n elif self._data_type == 'image':\n\n x, y = self._averaging, self._averaging\n\n if (x,y) == (1, 1):\n self.vectors = self._original_data\n # calling original data\n return\n\n tempdat = self._original_data\n range_x = tempdat.shape[0]\n range_y = tempdat.shape[1]\n x_offset = int((x - 1) / 2)\n y_offset = int((y - 1) / 2)\n\n kernel = np.ones(shape=(x, y)) / (x*y)\n\n output_mat = np.zeros_like(tempdat)\n output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel,\n mode='same', boundary='wrap')\n output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel,\n mode='same', boundary='wrap')\n\n output_mat[:, :, 0] = output_mat_x\n output_mat[:, :, 1] = output_mat_y\n\n self.vectors = (output_mat[x_offset:range_x-x_offset:x,\n y_offset:range_y-y_offset:y])", "def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3 + self.rating_4 + self.rating_5 + self.rating_6 + self.rating_7) / 7", "def mean(self):\n return self.vmean", "def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def avgX(self):\n return np.mean(self.getx())", "def _get_mean(self, sums, step):\n\n return sums/step", "def get_average(data):\n average = sum(data) / len(data)\n\n return average", "def averaging(self, value: int):\n self._averaging = value\n\n self.events.averaging()\n self._update_avg()\n\n self.refresh()", "def _mean(items):\n return sum(items) / len(items)", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0", "def CalculateListAverage(values):\n if not values:\n return 0\n return sum(values) / float(len(values))", "def average(old_rating, new_rating, count):\n return float(round(Decimal((old_rating * count + new_rating) / (count + 1)), 1))", "def moving_avg_filter(data, filter_size=filter_size):\n filter_size = int(filter_size)\n smoothed = np.zeros(len(data))\n for n in range(filter_size, len(data) - filter_size):\n vals = data[n - filter_size : n + filter_size + 1]\n smoothed[n] = np.mean(vals)\n return smoothed", "def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()", "def add_to_average(total_count, total_value, new_value):\n return ((1.0 * total_count * total_value) + new_value) / (total_count + 1)", "def mean(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n weighted_sum = sum(key * value for key, value in clean.items())\n return weighted_sum / total", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val", "def load_average(self):\n return _favg(self.load_samples)", "def mean(self):\n return self.aggregate(np.mean)", "def get_value(\n self\n ) -> float:\n\n return self.average", "def mean(vals):", "def mean(self):\n return self.cond_proba.mean", "def _image_average(self, images):\n image_data = [\n image.normalize().data for image in images\n # Workaround: skip partial volcano images at the edges\n if image.data.shape[0] == image.data.shape[1]\n ]\n return np.rint(\n np.mean(image_data, axis=0)\n ).astype(np.uint8)", "def calc_average():\r\n total = 0\r\n count = 0\r\n for i in records:\r\n total+=int(i[i.find(',')+1:])\r\n count+=1\r\n average = total/count\r\n return average", "def calculate_average(array):\n result = 0\n for item in array:\n result += float(item)\n final_result = result/len(array)\n return final_result", "def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()", "def avg(self):\n if not self.committed_together:\n return 0\n\n return round(statistics.mean(self.committed_together))", "def mean(self, weight_by_area=True):\n if weight_by_area:\n return self.integral() / self.indicator.integral()\n else:\n return self.sum() / self.indicator.sum()", "def mean(self):\r\n return np.mean(self.data_array)", "def should_average(self):\n return self._should_average", "def get_average(array):\n total = sum(array)\n count = len(array)\n average = total / count\n return average", "def average_ps(self):\n\n self.powerspectrum=np.average(self.powerspectra, axis=0)", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def forward_avg(array_in):\n return (array_in[:-1] + array_in[1:]) * 0.5", "def mean(self):\n return sum(p * x for x, p in self.items())", "def mean(values):\r\n return sum(values) / float(len(values))", "def mean(self):\n mean = sum(self.data)/self.size\n return mean", "def avg(a,b):\r\n return (a+b)/2", "def avg(arr):\n return sum(arr) / float(len(arr))", "def get_mean(self):\n return self.serie.mean()", "def average(self, returns):\r\n return returns.mean() * self.day", "def aver_and_var(self):\n # assert not self.is_empty\n\n for axis in range(3):\n c1, c2 = self.bounds[axis]\n w = self.n_pix_partial[axis]\n aver = np.average(np.arange(c1, c2), weights=w)\n var = np.average(np.arange(c1, c2)**2, weights=w) - aver ** 2 # D = E(X^2) - (EX)^2\n yield aver, var", "def _mask_and_avg(values, padding_mask):\n\tdec_lens = torch.sum(padding_mask,dim=1)\n\tlosses = torch.stack(values, dim=1)\n\tlosses = losses * padding_mask\n\tvalues_per_ex = torch.sum(losses, dim=1)/dec_lens\n\treturn torch.sum(values_per_ex)", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )", "def ram_average(self):\n return _favg(self.ram_samples)", "def mean(self) -> float:\n return self._data.mean()", "def mean(self) -> typing.Tuple[float, float]:\r\n self.clean_window()\r\n return (\r\n (self.sum_frames_rec / self.window_size),\r\n (self.sum_frames_proc / self.window_size)\r\n )", "def _mask_and_avg(values, padding_mask):\n\n dec_lens = tf.reduce_sum(padding_mask, axis=1) # shape batch_size. float32\n values_per_step = [v * padding_mask[:,dec_step] for dec_step,v in enumerate(values)]\n values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member\n return tf.reduce_mean(values_per_ex) # overall average", "def _calc_mean(self, f, a, b, z):\n\n c_1 = (t.exp(a[...,0] * f[...,0] + b[...,0]) *\n (a[...,0] * f[...,0] - 1)\n ) / a[...,0]**2\n c_2 = ((t.exp(a[...,1:-1] * f[...,1:] + b[...,1:-1]) * (a[...,1:-1] * f[...,1:] - 1) -\n t.exp(a[...,1:-1] * f[...,:-1] + b[...,1:-1]) * (a[...,1:-1] * f[...,:-1] - 1)\n ) / a[...,1:-1]**2).sum(-1)\n c_3 = (t.exp(a[...,-1] * f[...,-1] + b[...,-1]) *\n (a[...,-1] * f[...,-1] - 1)\n ) / a[...,-1]**2\n\n return 1/z * (c_1 + c_2 - c_3)", "def fmean(items):\n if len(items) == 0:\n return 0.\n\n return fsum(items) / float(len(items))", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def average(data):\r\n sum =0\r\n for i in data:\r\n sum+=i\r\n return sum/len(data)", "def mean_value( values ):\n return sum( values ) / len( values )", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None" ]
[ "0.75439095", "0.73029804", "0.7202501", "0.7086935", "0.7005572", "0.6967038", "0.6947147", "0.6872095", "0.68588364", "0.6849239", "0.6839921", "0.6821161", "0.6814509", "0.6814509", "0.6768326", "0.6744722", "0.6728115", "0.67118907", "0.6687017", "0.6667109", "0.66239166", "0.66239166", "0.66239166", "0.6623521", "0.66126484", "0.66093016", "0.6601945", "0.6601816", "0.65997225", "0.65863407", "0.6572925", "0.65727293", "0.65723544", "0.6559942", "0.65559775", "0.6552718", "0.65458965", "0.6539155", "0.6538438", "0.65290755", "0.6517511", "0.65164393", "0.6513403", "0.6509033", "0.65089816", "0.6506753", "0.6499666", "0.64967686", "0.6482991", "0.64741886", "0.6472848", "0.6465913", "0.6464481", "0.6429", "0.6424315", "0.6418251", "0.64181316", "0.6398562", "0.6389199", "0.6378304", "0.6360915", "0.6358448", "0.6332067", "0.63290864", "0.63280785", "0.63278604", "0.63278127", "0.6303025", "0.6302971", "0.63026375", "0.63007885", "0.6291542", "0.628932", "0.6287065", "0.6285254", "0.62781733", "0.62781733", "0.62779796", "0.6277939", "0.6262092", "0.62574905", "0.62571216", "0.62466836", "0.6240832", "0.623737", "0.62320375", "0.62318015", "0.6231243", "0.6222909", "0.62173784", "0.6210558", "0.6208459", "0.62065005", "0.6198537", "0.61983895", "0.61961526", "0.61923254", "0.6191409", "0.61913174", "0.61854523" ]
0.7149273
3
Resets the filter values.
def Reset(self): n = len(self.Values) for i in range(0, n): self.Values.pop(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def do_reset(self, args):\n\t\tself.parent.filter = {}\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def reset_values(self):\n\n self.values = []", "def ResetAvgFilter(self):\n self.k = 1\n self.prevAvg = 0", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1", "def reset_values(self):\n\n self.values = np.array([])", "def _reset(self):\n self._values = {}", "def reset(self):\n self.fuzz_complete = False\n self.mutant_index = 0\n self.value = self.original_value", "def reset_instances_filter(self):\n page_instances = self.page_instances()\n page_instances.field_filter_instances.value = ''\n page_instances.button_filter_instances.click()", "def reset(self):\n self.params.resetParams()", "def reset(self):\n self.sample['masked'] = [False]*len(self.sample.index)\n self.sample['colour'] = ['undefined']*len(self.sample.index)", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n if np.any(self.val != self.valinit):\n self.set_val(self.valinit)", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._highpass_sos)\n print('Zi shape: ', zi.shape, data.shape)\n self._highpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the high-pass filter state.')", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def reset(self) -> None:\n self.val = None\n self.notes = []\n self.blocked = False\n self.forbidden = False", "def reset_parameters(self):\n self.apply(ixvr)", "def reset_parameters(self):\n self.apply(ixvr)", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset(self):\n self.data = {}\n self.pf.reset()\n\n self.tc.reset()\n # Reset the neuron grid\n (self.n_n, XE, YE, IE, _, _) = self.init_pix_rf_centers(\n self.l_n, self.l_i, self.ds, self.de, mode=self.neuron_layout,\n drop_prob=self.drop_prob\n )\n self.tc.t_XE.set_value(XE)\n self.tc.t_YE.set_value(YE)\n self.tc.t_IE.set_value(IE)\n self.pf = self.init_particle_filter(self.motion_prior, self.n_p)", "def reset(self):\n self.temp_data.clear()", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset_reservoir(self):\n self.state = np.zeros((self.state_size,1),dtype=self.typefloat)", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def reset(self):\n\n self.results = []\n self._plot()", "def reset(self):\n self._weights.clear()", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def _reset(self):\n self._value = self._default", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def lowpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._lowpass_sos)\n self._lowpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the low-pass filter state.')", "def reset_state(self):\n self.s = np.copy(self.s_i)", "def reset(self, **kwargs):\n if self.original_resources is not None:\n self.resources = self.original_resources\n else:\n coverage_filter = np.zeros((self.region, self.region))\n coverage_filter[np.random.uniform(0, 1, (self.region, self.region)) < self.coverage] = 1.\n self.resources = np.multiply(\n np.random.uniform(self.min_value, self.max_value, (self.region, self.region)),\n coverage_filter\n )", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def reset(self):\r\n self.state = copy.copy(self.mu)", "def reset(self):\n self._data = []", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def Reset(self):\n self._results = []", "def reset(self):\n self.value = None", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self):\n self.data = self._defaults", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def reset(self):\r\n self.buffer = np.zeros(self.nBins)\r\n self.counter = 0", "def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])", "def reset(self):\n if self.allowable_max < self.allowable_min:\n raise ValueError(\"{0} max less than min\".format(self))\n self.min_found = self.allowable_max * 1e6\n self.max_found = self.allowable_min * 1e-6\n self.avg_found = self.allowable_min * 0.0\n self.count = 0\n self.value = None", "def reset(self) -> None:\n self._vector = self._original_vector.copy()", "def reset() -> None:\n Parameter.by_name = {}", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def reset(self):\n self.final_result = Milter.ACCEPT\n self.new_headers = []", "def reset(self):\n \n # start with all zeros\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n\n return self.observation(self.env.reset())", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]", "def reset(self):\n \n pass", "def reset(self):\n self.value = self.min_value", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def reset(self, **kwargs):\n coverage_filter = np.zeros((self.region, self.region))\n coverage_filter[np.random.uniform(0, 1, (self.region, self.region)) < self.coverage] = 1.\n self.resources = np.multiply(\n np.random.uniform(self.min_value, self.max_value, (self.region, self.region)),\n coverage_filter\n )", "def reset(self):\n self._idx = 0", "def reset(self) -> None:\n self.f1.reset()", "def reset(self):\n self.c_count = 0\n self.a_count = -1\n self.epsilon = self.init_epsilon", "def reset(self):\n self._previous_v = 0\n self._previous_m = 0\n self._previous_shape = 0", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def reset(self):\n self.observation = None\n self.history.clear()\n for i in range(len(self.answers)):\n self.answers[i] = None\n self.reset_metrics()", "def reset(self):\n self.items = np.arange(self.ratings.shape[1])", "def reset(self):\n self._hist.reset()\n return self", "def reset(self):\n # type: () -> None\n self._all_scalar_oids = []\n self._use_scalar_oids_cache = False", "def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")" ]
[ "0.7977834", "0.7429131", "0.74247444", "0.7300497", "0.7222309", "0.72031015", "0.7117929", "0.7102414", "0.7066342", "0.70600677", "0.6972775", "0.68997204", "0.68905616", "0.68335426", "0.6815627", "0.6741218", "0.66964805", "0.66880286", "0.66741985", "0.66408324", "0.6621593", "0.66203445", "0.65867704", "0.6579275", "0.6564767", "0.6562191", "0.65579915", "0.65389013", "0.6534392", "0.6534392", "0.6534091", "0.6517773", "0.6511191", "0.6509498", "0.6508387", "0.65040416", "0.64888734", "0.64864266", "0.64864266", "0.64862555", "0.64760786", "0.6458746", "0.645328", "0.64527684", "0.6443988", "0.6442775", "0.6442775", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64400023", "0.64321685", "0.64221734", "0.6420217", "0.64153", "0.6412687", "0.6411224", "0.64025396", "0.6364966", "0.63626665", "0.6353531", "0.6352526", "0.6341983", "0.6341983", "0.6341983", "0.6341983", "0.6340431", "0.6330734", "0.6313267", "0.6308201", "0.630681", "0.6304132", "0.6299772", "0.6294226", "0.62802464", "0.62754744", "0.62754744", "0.62754744", "0.62742615", "0.62617624", "0.62570083", "0.62435037", "0.6233642", "0.62334454", "0.6230957", "0.62264913", "0.6225351", "0.621338", "0.6208832", "0.62068033", "0.6206749", "0.6204399", "0.6201092" ]
0.64528835
43
return the current schema_org schema version
def get_schema_org_version(): return _get_schemaorg_version()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_schemaorg_version():\n try:\n version = get_latest_schemaorg_version()\n except ValueError:\n version = SCHEMAORG_DEFAULT_VERSION\n return version", "def schema_version(self):\n # return self._parsed[\"schemaVersion\"]\n # does not exist in manifest reference\n pass", "def schema_version(self):\n return self._parsed[\"schemaVersion\"]", "def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")", "def get_datasetSchemaVersion(self):\n\t\treturn self.dsDoc['about']['datasetSchemaVersion']", "def get_problemSchemaVersion(self):\n\t\treturn self.prDoc['about']['problemSchemaVersion']", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def schema_version(conn):\n with Tx(conn) as c:\n try:\n c.execute('SELECT version FROM meta LIMIT 1', ['version'])\n except psycopg2.ProgrammingError:\n return 0\n if c.rowcount == 0:\n return 0\n return c.fetchone()['version']", "def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")", "def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]", "def db_version():\n return IMPL.db_version()", "def get_version(self):\n return 0", "def version(self):\n return self.get_current_version()", "def version(self):\r\n print migration.db_version()", "def version(self):\n if \"version\" in self._prop_dict:\n return self._prop_dict[\"version\"]\n else:\n return None", "def version(self):\n return self._get(\"version\")", "def get_version(self):\n pass", "def __get_db_version_int(self):\r\n query = QtSql.QSqlQuery('PRAGMA user_version')\r\n query.first()\r\n return query.value(0).toInt()[0]", "def get_version(self):\n return self.version", "def getversion(self):\n return self.__version", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")", "def version(self):\n if not self._version:\n self._version = self._get_version()\n\n return self._version", "def model_version(self) -> str:\n return pulumi.get(self, \"model_version\")", "def get_version():\n global __model\n return __model.__version__", "def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)", "def version(self):\n if not hasattr(self, \"_version_string\"):\n return None\n return semantic_version.Version(self._version_string)", "def get_version(self):\n return self._version", "def get_version(self):\n return self._version", "def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10", "def get_version(self):\n return version.__version__", "def get_version(self):\n return version.__version__", "def get_version(self):\n return self.cur_config['version']['name']", "def getPackageVersion(self):\n return _libsbml.QualPkgNamespaces_getPackageVersion(self)", "def get_version():\n return 1", "def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")", "def version(self):\n return self.__version", "def version(self):\n return self.__version", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n return self._version", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get(self):\n return self._version", "def version(self) -> str:\n return self.w3.version.node # type of connected node", "def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")", "def get_schema(self):\r\n return self.__schema", "def _get_version(self):", "def version(self):\n return 1", "def version(self):\n\n return self._version", "def db_version(engine):\n return IMPL.db_version(engine)", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self):\r\n return self.version_guid", "def version(self):\n if not hasattr(self, '_version'):\n self._version = self._get_package_version()\n return self._version", "def schema(self):\n return self.prov[PROV_SCHEMA]", "def FormatVersion(self):\n return self._get_attr('FormatVersion')", "def version(self):", "def Version(self):\n if self.force_auto_sync:\n self.get('Version')\n return self._Version", "def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def version(self):\n pass", "def version(self):\n pass", "def version(self):\n pass", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def version(self):\r\n return self.definition_id", "def _get_supported_grype_db_version() -> str:\n grype_wrapper = GrypeWrapperSingleton.get_instance()\n try:\n version_response = grype_wrapper.get_grype_version()\n except CommandException as exc:\n raise GrypeVersionCommandError() from exc\n try:\n return str(version_response[\"supportedDbSchema\"])\n except KeyError as exc:\n raise InvalidGrypeVersionResponse(json.dumps(version_response)) from exc", "def get_product_version(self):\n return self.get_attr('product_version')", "def version(self):\n\n return self.manifest[\"version\"]", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def object_version(self) -> typing.Optional[str]:\n return self._values.get('object_version')", "def version():\n return __VERSION__", "def get_model_format_version(self):\n return None if self.model is None else self.model.get_format_version()", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def pretty_version(self) -> str:\n try:\n return self._names_from_attrs('pretty_version')\n except AttributeError:\n warnings.warn('pretty __version not found in metadata, fallback to globals.py')\n if self.__version in globals._dataset_version_pretty_names.keys():\n return globals._dataset_version_pretty_names[self.__version]\n else:\n warnings.warn('pretty __version also not found in globals, use __version')\n return self.__version", "def get_version():\n return magpy.get_version()", "def getVersion(self):\n return _libsbml.SBase_getVersion(self)", "def getPackageVersion(self):\n return _libsbml.CompBase_getPackageVersion(self)", "def getObjectVersion(self):\n return _libsbml.SBase_getObjectVersion(self)", "def get_version(self):\n return self.__make_api_call('get/version')", "def version(self) -> Optional[pulumi.Input['FhirStoreVersion']]:\n return pulumi.get(self, \"version\")", "def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")", "def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version" ]
[ "0.84921485", "0.82677555", "0.8089187", "0.7736187", "0.74551374", "0.7435045", "0.73047084", "0.6960835", "0.68983686", "0.68766624", "0.68467546", "0.68244046", "0.6751349", "0.67256296", "0.6715355", "0.6658414", "0.6645252", "0.6636151", "0.6614081", "0.66119516", "0.6587621", "0.6576962", "0.65766615", "0.6544186", "0.6543358", "0.65220594", "0.6519874", "0.6515619", "0.6515619", "0.65141004", "0.65124273", "0.65124273", "0.6503655", "0.6501995", "0.6497972", "0.64950573", "0.64895916", "0.64895916", "0.6479773", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.64743227", "0.6465402", "0.6453743", "0.6448462", "0.64444953", "0.64424604", "0.6435546", "0.6405763", "0.6402527", "0.63972604", "0.6397056", "0.6397056", "0.6397056", "0.6397056", "0.6397056", "0.63970387", "0.63935465", "0.6386636", "0.6386297", "0.6382127", "0.6376917", "0.63666004", "0.6361542", "0.6358767", "0.6358767", "0.6358767", "0.6347323", "0.6332333", "0.6332333", "0.6332333", "0.6332333", "0.6326787", "0.6326496", "0.6322849", "0.63148224", "0.63121474", "0.6306144", "0.6297637", "0.6297341", "0.62820435", "0.62793493", "0.62766224", "0.62684375", "0.62601113", "0.62534493", "0.6252142", "0.6250679", "0.6246292", "0.6245942", "0.624385" ]
0.89709204
0
Return a list of schema namespaces registered in DDE
def registered_dde_schemas(self): return [s["_id"] for s in schemas.get_all(size=100)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registered_dde_schemas(self):\n url = DDE_SCHEMA_BASE_URL + \"?field=_id&size=20\"\n if self.verbose:\n print(f'Loading registered DDE schema list from \"{url}\"')\n data = load_json_or_yaml(url)\n return [s[\"namespace\"] for s in data[\"hits\"]]", "def namespaces(self):\n return list(self._namespace_schemas.keys())", "def registered_dde_schemas(verbose=False):\n url = DDE_SCHEMA_BASE_URL + \"?field=_id&size=20\"\n if verbose:\n print(f'Loading registered DDE schema list from \"{url}\"')\n data = load_json_or_yaml(url)\n return [s[\"namespace\"] for s in data[\"hits\"]]", "async def list_namespaces(self) -> list:\n return await self.AD.state.list_namespaces()", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def namespaces(self):\n return [self._namespace_prefix]", "def namespaces(self):\n return self.namespaced_fields().namespaces()", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def namespaces(self):\n namespaces = set()\n for namespace_package in self.namespace_packages:\n dotted_name = []\n for component in namespace_package.split('.'):\n dotted_name.append(component)\n namespaces.add(tuple(dotted_name))\n return sorted(namespaces, key=lambda n: len(n))", "def get_namespaces():\n return list(StaticAsset._load_namespaces().keys())", "def _fetch_all_namespaces():\n response = _fetch_herd_session() \\\n .get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,\n HERD_REST_BASE_PATH, 'namespaces')) \\\n .json()\n\n namespaces = []\n for namespaceKey in response['namespaceKeys']:\n namespaces.append(namespaceKey['namespaceCode'])\n\n _print_info('Retrieved {} namespaces.'.format(len(namespaces)))\n return namespaces", "def get_all_namespaces():\n cmds.namespace(setNamespace=':')\n return cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)", "def get_namespaces():\r\n\r\n print 'Getting namespaces'\r\n tree = etree.parse('http://lesswrong.wikia.com/wiki/Special:AllPages', parser)\r\n options = tree.xpath('//select[@id=\"namespace\"]/option')\r\n namespaces = [option.get('value') for option in options]\r\n pprint(namespaces)\r\n return namespaces", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def namespaces(self):\n if not self._namespaces:\n self.update_namespaces_info()\n\n return self._namespaces", "def getNamespaces(self):\n return _libsbml.SBMLDocument_getNamespaces(self)", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def GetProvidedNamespaces(self):\n return set(self._provided_namespaces)", "def test_get_namespaces_names(self):\n pass", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def getNamespaces(self):\n return _libsbml.XMLToken_getNamespaces(self)", "def get_namespaces(self):\n if self.namespaces is None:\n namespaces = unpack(self.api.get_namespaces())\n self.namespaces = {\n namespace['name']: DevopsSecurityNamespace(namespace)\n for namespace in namespaces\n }\n return self.namespaces", "def ns_list(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.authoritative_servers)", "def get_edge_namespaces():\n hint = request.form['namespaces']\n\n result = {'status': FAIL, 'message': '', 'data': {}}\n try:\n result['status'] = SUCCESS\n result['data']['autocomplete_field'] = []\n result['data']['select_field'] = []\n if hint != '':\n edge_session = edge(edge_create_internal_ns_configuration.edge_url,\n edge_create_internal_ns_configuration.client_id,\n edge_create_internal_ns_configuration.clientSecret)\n\n namespaces = edge_session.get_namespaces()\n count = 0\n for namespace in namespaces:\n if namespace['name'].startswith(hint):\n\n result['data']['autocomplete_field'].append({\n 'input': namespace['id'],\n 'value': '%s (%s)' % (namespace['name'], namespace['id'])\n })\n result['data']['select_field'].append({\n 'id': namespace['id'],\n 'txt': namespace['name']\n })\n if count == 10:\n break\n count += 1\n except Exception as e:\n result['status'] = FAIL\n result['message'] = 'Error while searching for Namespaces: %s and hint: %s!' % (util.safe_str(e), hint)\n return result", "def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)", "def namespaces(self):\n return ()", "def GetRequiredNamespaces(self):\n return set(self._required_namespaces)", "def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"namespaces\")", "def test_list_net_namespace(self):\n pass", "def SBMLNamespaces_getSupportedNamespaces():\n return _libsbml.SBMLNamespaces_getSupportedNamespaces()", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]", "def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)", "def schemas(self):\n return self.get_schemas()", "def generate_namespaces(self, graph, graph_mode):\n namespaces = []\n if graph_mode == GATEWAY_GATEWAY_GRAPH:\n nodes = graph.gateway_nodes\n namespaces = list(set([roslib.names.namespace(n) for n in nodes]))\n\n elif graph_mode == GATEWAY_PULLED_GRAPH or \\\n graph_mode == GATEWAY_FLIPPED_GRAPH:\n gateway_nodes = graph.gateway_nodes\n connection_nodes = graph.flipped_nodes\n if gateway_nodes or connection_nodes:\n namespaces = [roslib.names.namespace(n) for n in gateway_nodes]\n # an annoyance with the rosgraph library is that it\n # prepends a space to topic names as they have to have\n # different graph node namees from nodes. we have to strip here\n namespaces.extend([roslib.names.namespace(n[1:]) for n in connection_nodes])\n\n return list(set(namespaces))", "def namespace_packages(self):\n dotted_names = []\n namespace_packages_file = self.find_egg_info_file('namespace_packages.txt')\n if namespace_packages_file:\n with open(namespace_packages_file) as handle:\n for line in handle:\n line = line.strip()\n if line:\n dotted_names.append(line)\n return dotted_names", "def list(self, dict_output=False, field_selector=\"\"):\n namespaces_list = self.client_core.list_namespace().items\n logger.info(\"Got namespaces\")\n\n if field_selector:\n namespaces_list = field_filter(obj_list=namespaces_list,\n field_selector=field_selector)\n # convert the list to list of dicts if required\n if dict_output:\n namespaces_list = [convert_obj_to_dict(namespace) for namespace in\n namespaces_list]\n else:\n for namespace in namespaces_list:\n namespace.metadata.resource_version = ''\n return namespaces_list", "def getSupportedNamespaces():\n return _libsbml.SBMLNamespaces_getSupportedNamespaces()", "def exemptable_namespaces(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"exemptable_namespaces\")", "def schemas(self):\n if not self._schemas:\n self._schemas = get_schema(self.attributes.workspace.namespace, self.attributes.workspace.name)\n return self._schemas", "def get_schemas(self):\n query = mssqlqueries.get_schemas()\n logger.info(u'Schemas query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def getSBMLNamespaces(self):\n return _libsbml.SBasePlugin_getSBMLNamespaces(self)", "def all_in_namespace(cls, ns):\n return filter_by_prefix(cls.all(), ns + ':')", "def get_all_namespaces(\n soa_dir: str = DEFAULT_SOA_DIR,\n) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:\n rootdir = os.path.abspath(soa_dir)\n namespace_list: List[Tuple[str, ServiceNamespaceConfig]] = []\n for srv_dir in os.listdir(rootdir):\n namespace_list.extend(get_all_namespaces_for_service(srv_dir, soa_dir))\n return namespace_list", "def getSBMLNamespaces(self):\n return _libsbml.ASTBasePlugin_getSBMLNamespaces(self)", "def get_services_in_namespace(self, namespace):\n ret = self.v1_service_list.get(namespace=namespace)\n return [each.metadata.name for each in ret.items]", "def get_all_typespaces(schema_obj):\n\n typespaces = []\n for vendor in schema_obj.vendor_list:\n for typespace in vendor.typespace_list:\n typespaces.append(typespace)\n return typespaces", "def get_schema_defs():\n return SCHEMA_DEFS", "def namespaces(self, psuedo=True):\n if self._namespaces == None:\n result = self.call({'action': 'query',\n 'meta': 'siteinfo',\n 'siprop': 'namespaces'})\n self._namespaces = {}\n self._psuedo_namespaces = {}\n for nsid in result['query']['namespaces']:\n if int(nsid) >= 0:\n self._namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n else:\n self._psuedo_namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n if psuedo:\n retval = {}\n retval.update(self._namespaces)\n retval.update(self._psuedo_namespaces)\n return retval\n else:\n return self._namespaces", "def findModuleSchemas(self):\n if self.codebase:\n module = self.codebase.instantiate(self.service_module_name)\n else:\n module = importlib.import_module(self.service_module_name)\n\n res = []\n\n for o in dir(module):\n if isinstance(getattr(module, o), Schema):\n res.append(getattr(module, o))\n\n return res", "def namespace(self):\n return VarLookupDict(self._namespaces)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.LayoutExtension_getSBMLExtensionNamespaces(self, *args)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.CompExtension_getSBMLExtensionNamespaces(self, *args)", "def _get_cloud_function_namespaces():\n logger.info(\n f\"Obtaining Cloud Function namespaces in {self.namespace_region}\"\n )\n\n namespaces = []\n\n collecting_namespaces = True\n max_limit = 200\n offset = 0\n\n # request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.\n while collecting_namespaces:\n namespace_metadata = _get_cloud_function_namespaces_metadata(offset)\n if namespace_metadata[\"total_count\"] == max_limit:\n offset += max_limit\n else:\n collecting_namespaces = False\n\n for name_space in namespace_metadata[\"namespaces\"]:\n if \"name\" in name_space: # API based namespace\n namespaces.append(\n {\n \"name\": name_space[\"name\"],\n \"type\": \"API_based\",\n \"id\": name_space[\"id\"],\n \"region\": name_space[\"location\"],\n }\n )\n\n else: # cloud foundry based namespace\n namespaces.append(\n {\n \"name\": name_space[\"id\"],\n \"type\": \"CF_based\",\n \"region\": name_space[\"location\"],\n }\n )\n\n return namespaces", "def test_get_namespaces_from_accounts(self):\n pass", "def getSBMLNamespaces(self):\n return _libsbml.SBase_getSBMLNamespaces(self)", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def list_services(self, **kwargs: Optional[Any]) -> list:\n\n self.logger.debug(\"list_services: %s\", kwargs)\n\n namespace = kwargs.get(\"namespace\", \"global\")\n\n return self.AD.services.list_services(namespace) # retrieve services", "def _walk_schemas(self, schema_dir):\n seen = []\n schemalocs = collections.defaultdict(list)\n\n for top, _, files in os.walk(schema_dir):\n for fn in files:\n if not fn.endswith('.xsd'):\n continue\n\n fp = os.path.abspath(os.path.join(top, fn))\n target_ns = utils.get_target_ns(fp)\n\n if (target_ns, fn) in seen:\n continue\n\n schemalocs[target_ns].append(fp)\n seen.append((target_ns, fn))\n\n for ns, loc in iteritems(self.OVERRIDE_SCHEMALOC):\n schemalocs[ns] = [loc]\n\n return schemalocs", "def _load_namespaces(self):\n nsdocs = self._docset.get_namespaces()\n for nsdoc in nsdocs:\n nsobj = Namespace(nsdoc)\n self._docmap[nsdoc] = nsobj\n self._namespaces.add(nsobj)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.SBMLExtension_getSBMLExtensionNamespaces(self, *args)", "def test_list_policy_for_all_namespaces(self):\n pass", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.QualExtension_getSBMLExtensionNamespaces(self, *args)", "def list_namespaced_net_namespace(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespaceList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def getSBMLNamespaces(self):\n return _libsbml.XMLInputStream_getSBMLNamespaces(self)", "def referencedNamespaces (self):\n return frozenset(self.__referencedNamespaces)", "def items(self):\n return self.namespace_to_alias.items()", "def iterNamespaceURIs(self):\n return iter(self.namespace_to_alias)", "def namespaced_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NamespacedNameArgs']]]]:\n return pulumi.get(self, \"namespaced_names\")", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.GroupsExtension_getSBMLExtensionNamespaces(self, *args)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.FbcExtension_getSBMLExtensionNamespaces(self, *args)", "def get_ns_list(logger,body,v1=None):\n if v1 is None:\n v1 = client.CoreV1Api()\n logger.debug('new client - fn get_ns_list')\n \n try:\n matchNamespace = body.get('matchNamespace')\n except KeyError:\n matchNamespace = '*'\n logger.debug(\"matching all namespaces.\")\n logger.debug(f'Matching namespaces: {matchNamespace}')\n \n try:\n avoidNamespaces = body.get('avoidNamespaces')\n except KeyError:\n avoidNamespaces = ''\n logger.debug(\"not avoiding namespaces\")\n\n nss = v1.list_namespace().items\n matchedns = []\n avoidedns = []\n\n for matchns in matchNamespace:\n for ns in nss:\n if re.match(matchns, ns.metadata.name):\n matchedns.append(ns.metadata.name)\n logger.debug(f'Matched namespaces: {ns.metadata.name} matchpathern: {matchns}')\n if avoidNamespaces:\n for avoidns in avoidNamespaces:\n for ns in nss:\n if re.match(avoidns, ns.metadata.name):\n avoidedns.append(ns.metadata.name)\n logger.debug(f'Skipping namespaces: {ns.metadata.name} avoidpatrn: {avoidns}') \n # purge\n for ns in matchedns.copy():\n if ns in avoidedns:\n matchedns.remove(ns)\n\n return matchedns", "def list_schemas(jwt_payload: dict):\n DJConnector.set_datajoint_config(jwt_payload)\n\n # Attempt to connect return true if successful, false is failed\n return [row[0] for row in dj.conn().query(\"\"\"\n SELECT SCHEMA_NAME FROM information_schema.schemata\n WHERE SCHEMA_NAME != \"information_schema\"\n ORDER BY SCHEMA_NAME\n \"\"\")]", "def namespace_schema(self, namespace):\n try:\n return self._namespace_schemas[namespace]\n except KeyError:\n raise Error(\"undefined namespace: \\\"%s\\\"; defined namespaces: %s\" % (namespace, util.quoted_list(self._namespace_schemas.keys())))", "def test_get_namespaces_from_account(self):\n pass", "def list_namespaced_namespace(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_namespaced_namespace_with_http_info(**kwargs)\n else:\n (data) = self.list_namespaced_namespace_with_http_info(**kwargs)\n return data", "def _filter_non_existing_namespaces(namespaces, k8s_cli):\n return_code, out = run_shell_command(\n \"{} get ns -o=custom-columns=\\\"DATA:metadata.name\\\" --no-headers=true\".format(k8s_cli))\n if return_code:\n return []\n res = []\n existing_namespaces = set(out.split())\n for namespace in namespaces:\n if namespace in existing_namespaces:\n res.append(namespace)\n else:\n logger.warning(\"Namespace %s doesn't exist - Skipping\", namespace)\n return res", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.MultiExtension_getSBMLExtensionNamespaces(self, *args)", "def get_all_target_namespaces():\n setup_roots = get_all_setups_roots()\n techanim_ns = [x.split(\":\")[0] for x in setup_roots]\n namespaces = get_all_namespaces()\n filtered_ns = []\n for ns in namespaces:\n if ns in [\"UI\", \"ui\", \"shared\", \"Shared\"] + techanim_ns:\n continue\n filtered_ns.append(ns)\n return filtered_ns", "def full_schema_list(self, schema: str) -> List[str]:\n # Generate the information_schema identifier for that database\n # in order to be able to filter it out\n name_parts = schema.split(\".\")\n\n info_schema = f\"{name_parts[0]}.information_schema\"\n\n fetched_schemas = []\n\n # All Schemas\n if name_parts[1] == \"*\":\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n if db_schema != info_schema:\n fetched_schemas.append(db_schema)\n\n # Prefix schema match\n elif \"*\" in name_parts[1]:\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n schema_name = db_schema.split(\".\", 1)[1].lower()\n if schema_name.startswith(name_parts[1].split(\"*\", 1)[0]):\n fetched_schemas.append(db_schema)\n\n # TODO Handle more complicated matches\n\n else:\n # If no * in name, then return provided schema name\n fetched_schemas = [schema]\n\n return fetched_schemas", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def schemas(self):\n return model.Schemas(self)", "def getTables(self):\n\treturn self.dbNames", "def getTargetNamespaces(self):\n return _libsbml.SBMLConverter_getTargetNamespaces(self)", "def test_list_build_for_all_namespaces(self):\n pass", "def test_list_deployment_config_for_all_namespaces(self):\n pass", "def namespaces(\n self, index: Union[int, str] = \"len\"\n ) -> Union[List[str], int]:\n if index == \"len\":\n return len(self._namespaces)\n try:\n return self._namespaces[index] # type: ignore\n except IndexError:\n return []", "def test_list_policy_binding_for_all_namespaces(self):\n pass", "def getSBMLNamespaces(self):\n return _libsbml.XMLOutputStream_getSBMLNamespaces(self)", "def inScopeNamespaces (self):\n return self.__inScopeNamespaces", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def importedNamespaces (self):\n return frozenset(self.__importedNamespaces)", "def xmlrpc_namespace():", "def test_list_template_for_all_namespaces(self):\n pass", "def get_packages_with_prefixes():\n return get_resources('packages')", "def all_namespaces(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"all_namespaces\")", "def all_namespaces(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"all_namespaces\")", "def watch_namespaced_namespace_list(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_namespace_list_with_http_info(**kwargs)\n else:\n (data) = self.watch_namespaced_namespace_list_with_http_info(**kwargs)\n return data", "def included_namespaces(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"included_namespaces\")" ]
[ "0.82582337", "0.8003308", "0.7945194", "0.77889043", "0.74570817", "0.7238755", "0.7172259", "0.71680105", "0.7109484", "0.70704514", "0.70290285", "0.7007423", "0.68698627", "0.6844944", "0.6776967", "0.6739239", "0.6705429", "0.6666555", "0.65324306", "0.6531712", "0.6519842", "0.6518313", "0.6490346", "0.6466771", "0.6434772", "0.6407797", "0.6388113", "0.6360038", "0.6335757", "0.6326667", "0.63136715", "0.62907416", "0.62811786", "0.6259043", "0.62514067", "0.62320155", "0.62232125", "0.6172102", "0.61474466", "0.61181706", "0.6105629", "0.6097458", "0.6080958", "0.60755724", "0.60700965", "0.60442", "0.6040044", "0.6028223", "0.60143006", "0.5989021", "0.598621", "0.5982762", "0.5968615", "0.59596926", "0.595819", "0.59525335", "0.59358436", "0.5933187", "0.59076434", "0.59038776", "0.5891502", "0.58894205", "0.5860097", "0.5850401", "0.5835337", "0.5828073", "0.58273125", "0.5815855", "0.5806386", "0.580245", "0.5792757", "0.57856226", "0.5767895", "0.5761961", "0.57573116", "0.5753851", "0.57473683", "0.5732874", "0.57258934", "0.57246745", "0.5721927", "0.57166445", "0.5710927", "0.57093257", "0.5704529", "0.5690905", "0.5675393", "0.5668574", "0.56661046", "0.5662233", "0.56437135", "0.56352574", "0.56257015", "0.562203", "0.56179595", "0.5602367", "0.5570154", "0.5570154", "0.55689293", "0.5563344" ]
0.7246093
5
Load a registered schema
def load_dde_schemas(self, schema): if self.verbose: print(f'Loading registered DDE schema "{schema}"') schema_source = schemas.get(schema) schema_source.pop("_id") return schema_source
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_validator_schema():\n logger.info('Loading validator schemas')\n SchemaLoader.load_all_from_path(validator_config_path)", "def load_schema(schema_path):\n with open(schema_path) as schema_file:\n return Utils.parse(schema_file.read())", "def schema_load(filename):\n print(uc.schema_load(filename))", "def load_schema(name):\r\n\r\n data = pkgutil.get_data(__package__, \"schemas/{0}.json\".format(name))\r\n return json.loads(data.decode(\"utf-8\"))", "def load_dde_schemas(self, schema):\n url = DDE_SCHEMA_BASE_URL + schema\n if self.verbose:\n print(f'Loading registered DDE schema from \"{url}\"')\n return load_json_or_yaml(url)[\"source\"]", "def load_schema(self, schema_file):\n with open(schema_file) as fp:\n for line in io.lines_in(fp):\n parts = line.strip().split('\\t')\n if len(parts) != 3:\n raise ValueError('invalid type declaration %r' % line.strip())\n self.declare_relation(parts[0], parts[1], parts[2])", "def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)", "def load_dde_schemas(schema, verbose=False):\n url = DDE_SCHEMA_BASE_URL + schema\n if verbose:\n print(f'Loading registered DDE schema from \"{url}\"')\n return load_json_or_yaml(url)[\"source\"]", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def load_schema_dataset(self, dataset_raw):\r\n\r\n self._dataset_raw = dataset_raw\r\n return self\r\n # self._parse_schemas_raw()\r\n # print(schemas)\r", "def test_can_load_xsd_schema(self):\n schema = xmlschema.XMLSchema('valitest.xsd')\n self.assertIsInstance(schema, xmlschema.XMLSchema)", "def _load_schemas(self) -> None:\n schema_paths = self._root.rglob(\"*.json\")\n for schema_path in schema_paths:\n schema = json.loads(schema_path.read_text())\n\n if self._suffix:\n schema[\"name\"] = f'{schema[\"name\"]}{self._suffix}'\n\n fqn = get_avro_fqn(schema)\n self.schemas[fqn] = schema", "def load_default_schema(self):\n self.schema = preprocess_schema(load_schemaorg())\n self.schemaorg_schema = self.schema\n if \"@context\" in self.schema:\n self.context.update(self.schema[\"@context\"])\n self.schema_extension_only = self.schema\n self.schemaorg_nx = load_schema_into_networkx(self.schema)\n self.schema_extension_nx = self.schemaorg_nx\n self.schema_nx = self.schemaorg_nx\n isolates = list(nx.isolates(self.schema_nx))\n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)\n self.default_schema_loaded = True", "def load_validation_schema(self) -> t.Dict[str, t.Any]:\n if self._schema is None:\n try:\n self._schema = json.loads(self.schema())\n except KeyError:\n device_type_striped = self._device_type.lower().rstrip(string.digits)\n with open(_CT_FILES[device_type_striped], encoding=\"utf-8\") as file_:\n self._schema = json.load(file_)\n return self._schema # type: ignore", "async def reload_database(self, schema='conf/schema.sql'):\n with open(schema) as schema:\n await self.dao.build((schema.read()))", "def load_schema(path, collection, readonly):\n return JSONStorage(path, collection, readonly)", "def load_schema_for_modelling():\n filename = \"modelling_schema.csv\"\n folder = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(folder, filename)\n return pd.read_csv(path).set_index('table_name')", "def _load_tdx_schema(self):\n tdx_schema: TDXSchema = TDXSchema(dict())\n if _sqliteinfotable.checkInfoTable(self.sqlEngine):\n info_keys = _sqliteinfotable.getInfoKeys(\n self.sqlEngine, [SCHEMA_KEY], self.session_maker)\n if info_keys: # lists are False is empty\n info_keys.setdefault(SCHEMA_KEY, dict())\n # dataset schema definition\n tdx_schema = info_keys[SCHEMA_KEY]\n # dataset data schema\n tdx_schema.setdefault(\"dataSchema\", dict())\n self.tdx_schema = tdx_schema\n self.tdx_data_schema = t.cast(\n schemaconverter.TDXDataSchema, tdx_schema[\"dataSchema\"])", "def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema", "def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)", "def load(cls, data):\n if cls.Schema is None: # pragma: no cover\n msg = (\"Schema of this Model are not specified! For example: \"\n \"class User(BaseModel): ...; class UserSchema(Schema): ...; \"\n \"User.Schema = UserSchema\")\n raise NotImplementedError(msg)\n\n res = cls.Schema().load(data)\n if len(res.errors) == 0:\n return res.data\n else:\n raise Exception(\"Errors: {}\".format(res.errors))", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def test_can_import_xmlschema(self):\n self.assertEqual(xmlschema.__name__, 'xmlschema')", "def preload_local_schemas(self):\n schemas = [self.SCHEMA_PACKAGE_DESCRIPTOR,\n self.SCHEMA_SERVICE_DESCRIPTOR,\n self.SCHEMA_FUNCTION_DESCRIPTOR]\n\n for schema in schemas:\n schema_file = self._schemas[schema]['local']\n if not os.path.isfile(schema_file):\n continue\n try:\n self._schemas_library[schema] = load_local_schema(schema_file)\n except FileNotFoundError:\n continue", "def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)", "def schema() -> None:\n pass", "def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)", "def load_schemas():\n schemas = {}\n for filename in os.listdir(get_abs_path('schemas')):\n path = get_abs_path('schemas') + '/' + filename\n file_raw = filename.replace('.json', '')\n with open(path) as file:\n schemas[file_raw] = Schema.from_dict(json.load(file))\n return schemas", "def schema(self):\n pass", "def testSchemaLoadingAsString(self):\n api = self.ApiFromDiscoveryDoc('latitude.v1.json')\n self.assertEquals(4, len(api._schemas))", "def get_schema(self) -> dict:", "def schema(self):", "def connect(self):\n should_load_schema = False\n if not os.path.exists(self.filename):\n should_load_schema = True\n\n self._connect()\n\n if should_load_schema:\n self._load_schema()\n else:\n self._load_database()", "def test_validate_schema(schema_path):\n # Make sure that each schema itself is valid.\n schema_tree = schema.load_schema(schema_path, resolve_references=True)\n schema.check_schema(schema_tree)", "def load(self, base_schema):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n self.schema_org_version = get_schemaorg_version()\n _base_schema.append(\n load_schemaorg(version=self.schema_org_version, verbose=self.verbose)\n )\n continue\n elif self.is_a_dde_schema(_sc):\n _base_schema.append(self.load_dde_schemas(_sc))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):\r\n\r\n # Only one of file_name or schema_obj must be set\r\n assert bool(file_name) != bool(schema_obj)\r\n\r\n if path_prefix:\r\n spec_path = os.path.join(spec_path, \"APIs/schemas/\")\r\n base_path = os.path.abspath(spec_path)\r\n if not base_path.endswith(\"/\"):\r\n base_path = base_path + \"/\"\r\n if os.name == \"nt\":\r\n base_uri_path = \"file:///\" + base_path.replace('\\\\', '/')\r\n else:\r\n base_uri_path = \"file://\" + base_path\r\n\r\n loader = jsonref.JsonLoader(cache_results=False)\r\n\r\n if file_name:\r\n json_file = str(Path(base_path) / file_name)\r\n with open(json_file, \"r\") as f:\r\n schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n elif schema_obj:\r\n # Work around an exception when there's nothing to resolve using an object\r\n if \"$ref\" in schema_obj:\r\n schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n else:\r\n schema = schema_obj\r\n\r\n return schema", "def load_schema(self, template, reload=False):\n # Check if template is already loaded and present in _schemas_library\n if template in self._schemas_library and not reload:\n log.debug(\"Loading previously stored schema for {}\"\n .format(template))\n\n return self._schemas_library[template]\n\n # Load Online Schema\n schema_addr = self._schemas[template]['remote']\n if validators.url(schema_addr):\n try:\n log.debug(\"Loading schema '{}' from remote location '{}'\"\n .format(template, schema_addr))\n\n # Load schema from remote source\n self._schemas_library[template] = \\\n load_remote_schema(schema_addr)\n\n # Update the corresponding local schema file\n write_local_schema(self._schemas_local_master,\n self._schemas[template]['local'],\n self._schemas_library[template])\n\n return self._schemas_library[template]\n\n except RequestException as e:\n log.warning(\"Could not load schema '{}' from remote \"\n \"location '{}', error: {}\"\n .format(template, schema_addr, e))\n else:\n log.warning(\"Invalid schema URL '{}'\".format(schema_addr))\n\n # Load Offline Schema\n schema_addr = self._schemas[template]['local']\n if os.path.isfile(schema_addr):\n try:\n log.debug(\"Loading schema '{}' from local file '{}'\"\n .format(template, schema_addr))\n\n self._schemas_library[template] = \\\n load_local_schema(schema_addr)\n\n return self._schemas_library[template]\n\n except FileNotFoundError:\n log.warning(\"Could not load schema '{}' from local file '{}'\"\n .format(template, schema_addr))\n\n else:\n log.warning(\"Schema file '{}' not found.\".format(schema_addr))\n\n log.error(\"Failed to load schema '{}'\".format(template))", "def load_local_schema(filename):\n # Confirm that schema file exists\n if not os.path.isfile(filename):\n log.warning(\"Schema file '{}' does not exist.\".format(filename))\n raise FileNotFoundError\n\n # Read schema file and return the schema as a dictionary\n schema_f = open(filename, 'r')\n schema = yaml.load(schema_f)\n assert isinstance(schema, dict), \"Failed to load schema file '{}'. \" \\\n \"Not a dictionary.\".format(filename)\n\n return schema", "def __init__(self, schema ):\n self.schema = schema", "def initialize_schema(self, dry_run=False):\n if not dry_run:\n self.flush()", "def _load(self, list_of_schema_urls):\n for uri in list_of_schema_urls:\n with urllib.request.urlopen(uri) as url:\n data = {}\n try:\n data = json.loads(url.read().decode())\n except:\n print(\"Failed to read schema from \" + uri)\n self._parser._load_schema(data)\n return self", "def create_schema(self, schema: str):\n return", "def schema(self, schema):\n self._schema = schema", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def readjamschema(schema):\n raise NotImplementedError(msg)", "def load_base_schema(base_schema=None, verbose=False):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n # if \"schema.org\" in _base or \"schema\" in _base:\n # _base_schema.append(\n # load_schemaorg(verbose=verbose)\n # )\n # if \"bioschemas\" in _base:\n # _base_schema.append(\n # load_bioschemas(verbose=verbose)\n # )\n\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n _base_schema.append(load_schemaorg(verbose=verbose))\n continue\n elif _sc in registered_dde_schemas():\n _base_schema.append(load_dde_schemas(_sc, verbose=verbose))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema", "def get_schema() -> dict:\n raise NotImplementedError()", "def parse_schema_from_file(schema_path):\n with open(schema_path) as f:\n return parse_schema_from_string(f.read())", "def _load_bigquery_schemas(self):\n logger.info(\"Reading BigQuery schema files...\")\n for table_name in self.tables + self.type_tables:\n logger.info(f\"Reading schema file for table '{table_name}'...\")\n schema_json = resource_stream('sotorrent_pipeline',\n f'bigquery_schemas/{table_name}.json').read().decode()\n self.bigquery_schemas[table_name] = json.loads(schema_json)\n self.bigquery_schemas_with_fields[table_name] = json.loads('{\"fields\":' + schema_json + '}')\n logger.info(f\"Read {len(self.bigquery_schemas)} schema file(s).\")", "def init_db():\n db = get_db()\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))", "async def upgradeSchema(self) -> None:", "def _init_reader_schema(self, field_names=None):\n if field_names:\n return from_column_list(field_names)\n\n assert os.path.exists(self.db_path), \\\n 'db_path [{db_path}] does not exist'.format(db_path=self.db_path)\n with core.NameScope(self.name):\n # blob_prefix is for avoiding name conflict in workspace\n blob_prefix = scope.CurrentNameScope()\n workspace.RunOperatorOnce(\n core.CreateOperator(\n 'Load',\n [],\n [],\n absolute_path=True,\n db=self.db_path,\n db_type=self.db_type,\n load_all=True,\n add_prefix=blob_prefix,\n )\n )\n col_names = [\n blob_name[len(blob_prefix):] for blob_name in workspace.Blobs()\n if blob_name.startswith(blob_prefix)\n ]\n schema = from_column_list(col_names)\n return schema", "def init_db():\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))", "def __init__(self, schema=None):\n self.schema = schema or {}", "def set_schema(self, schema):\r\n self.__schema = schema", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def getSchema(cls):\n pass", "def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def check_schema_uri(self):\n import asdf\n\n if self.schema_uri is not None:\n with log.augment_exception(\"Invalid ASDF schema URI:\", self.schema_uri):\n asdf.schema.load_schema(self.schema_uri)", "def schema(self):\n raise NotImplementedError", "def test_with_defined_schema_and_inferred_schema_is_true(self):\n # should default to using the defined schema\n frame = self.context.frame.import_csv(self.dataset,\n infer_schema=True, schema=self.schema)\n self.assertEqual(frame.schema, self.schema)", "def setup_schema(command, conf, vars):", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def _get_schema(self):\n self._pick()\n return Schema()", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def get_schema(filename: str) -> dict:\n return _load_json_schema(filename)", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)", "def get_schema(cls):\n return cls.schema()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "def registered_dde_schemas(verbose=False):\n url = DDE_SCHEMA_BASE_URL + \"?field=_id&size=20\"\n if verbose:\n print(f'Loading registered DDE schema list from \"{url}\"')\n data = load_json_or_yaml(url)\n return [s[\"namespace\"] for s in data[\"hits\"]]", "def registered_dde_schemas(self):\n url = DDE_SCHEMA_BASE_URL + \"?field=_id&size=20\"\n if self.verbose:\n print(f'Loading registered DDE schema list from \"{url}\"')\n data = load_json_or_yaml(url)\n return [s[\"namespace\"] for s in data[\"hits\"]]", "def load_customer_schema(request):\n schema = schemas.load(schemas.Customer, request)\n if not schema['klantcode']:\n schema['klantcode'] = util.timebased_customer_code()\n return schema", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def load_schema_kq(self, q_idx, schema_fp, gather_linkings):\n candidate_list = []\n with codecs.open(schema_fp, 'r', 'utf-8') as br:\n sc_lines = br.readlines()\n for ori_idx, sc_line in enumerate(sc_lines):\n schema = Schema()\n # load schemas\n schema.read_schema_from_json(q_idx=q_idx, json_line=sc_line, gather_linkings=gather_linkings,\n ori_idx=ori_idx, full_constr=self.full_constr)\n\n candidate_list.append(schema)\n\n return candidate_list, len(sc_lines)", "def import_schemas_from_file():\n with open('./tblSchemas') as schemas_file:\n schemas = {}\n for line in schemas_file:\n line = line.split()\n if len(line) == 0: continue\n if line[0] == 'tblname':\n tbl_name = line[1]\n schemas[tbl_name] = []\n else:\n schemas[tbl_name].append(line)\n return schemas", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def load_schema_into_networkx(schema, load_class=True, load_property=True, load_datatype=True):\n # initialize DiGraph for classes, properties and data types\n G = nx.DiGraph()\n edges = []\n classes = {}\n for record in schema[\"@graph\"]:\n if record[\"@id\"] in DATATYPES and load_datatype:\n G.add_node(\n record[\"@id\"],\n description=record[\"rdfs:comment\"],\n type=\"DataType\",\n )\n edges += find_parent_child_relation(record)\n elif record[\"@type\"] == \"rdfs:Class\" and load_class:\n if record[\"@id\"] in classes:\n classes[record[\"@id\"]][\"description\"] = record[\"rdfs:comment\"]\n classes[record[\"@id\"]][\"type\"] = \"Class\"\n else:\n classes[record[\"@id\"]] = {\n \"description\": record[\"rdfs:comment\"],\n \"type\": \"Class\",\n \"properties\": [],\n \"used_by\": [],\n }\n # add class edges\n edges += find_parent_child_relation(record)\n elif record[\"@type\"] == \"rdf:Property\" and load_property:\n _domain, _range = find_domain_range(record)\n _inverse = record.get(\"http://schema.org/inverseOf\")\n if _inverse:\n _inverse = _inverse[\"@id\"]\n G.add_node(\n record[\"@id\"],\n description=record[\"rdfs:comment\"],\n domain=_domain,\n range=_range,\n inverse=_inverse,\n type=\"Property\",\n )\n property_info = {\n \"description\": record[\"rdfs:comment\"],\n \"domain\": _domain,\n \"range\": _range,\n \"inverse\": _inverse,\n \"uri\": record[\"@id\"],\n }\n for _id in _domain:\n if _id not in DATATYPES:\n if _id not in classes:\n classes[_id] = {\n \"properties\": [property_info],\n \"type\": \"Class\",\n \"used_by\": [],\n }\n else:\n classes[_id][\"properties\"].append(property_info)\n for _id in _range:\n if _id not in DATATYPES:\n if _id not in classes:\n classes[_id] = {\n \"used_by\": [property_info],\n \"type\": \"Class\",\n \"properties\": [],\n }\n else:\n classes[_id][\"used_by\"].append(property_info)\n edges += find_parent_child_relation(record, _type=\"Property\")\n G.add_edges_from(edges)\n G.add_nodes_from(list(classes.items()))\n\n return G", "def loadPlotSchema(path):\n\n document = loadAsDom(path)\n removeWhitespaceText(document)\n if len(document.childNodes) != 1 \\\n or document.childNodes[0].nodeName != \"plot-schema\":\n raise ParseError, \"document is not an XML plot schema\"\n\n return _plotSchemaFromDom(document.childNodes[0])", "def schema(self, name):\n return model.Schema(self, name)", "def registration_schema(self, ctx):\n schema = RegistrationSchema()\n schema.context['ctx'] = ctx\n return schema", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def test_schema_exists(self):\n return exclusions.open()" ]
[ "0.71533245", "0.7038362", "0.70110863", "0.6746675", "0.6709357", "0.6656535", "0.6612241", "0.66008633", "0.65909773", "0.65366113", "0.64810675", "0.6431707", "0.6424626", "0.64035326", "0.63424355", "0.63148296", "0.63040596", "0.62571245", "0.6245016", "0.62211007", "0.6206376", "0.61778533", "0.6171148", "0.6123724", "0.6101996", "0.6071951", "0.6054436", "0.6053434", "0.60440046", "0.6019805", "0.59945923", "0.59655696", "0.5964484", "0.5942297", "0.59147173", "0.590759", "0.59049237", "0.5902591", "0.59022856", "0.5886134", "0.5885737", "0.58811975", "0.58689314", "0.5844938", "0.5829263", "0.5794516", "0.57943183", "0.5792365", "0.57895774", "0.5787621", "0.5784688", "0.5768347", "0.5766207", "0.57573867", "0.57508343", "0.57493067", "0.5744583", "0.57378614", "0.5725936", "0.5722992", "0.56937", "0.5691225", "0.5689192", "0.56891805", "0.5677761", "0.56703377", "0.56580675", "0.56573874", "0.5655772", "0.56491274", "0.56489885", "0.56485575", "0.56485575", "0.56485575", "0.56460994", "0.56443095", "0.5642676", "0.56411463", "0.5638903", "0.56347317", "0.56339353", "0.56325275", "0.56202036", "0.56135637", "0.56135637", "0.56135637", "0.56135637", "0.56135637", "0.56135637", "0.56135637", "0.5593425", "0.5578955", "0.5565664", "0.5563962", "0.55499846", "0.55496264", "0.5542157", "0.5539231", "0.5534562", "0.5532619" ]
0.6969959
3
get only classes defined in this schema
def get_class_defs(self): return list(self._get_class_defs().values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_all_classes(self):\n classes = list(self.extended_class_only_graph.nodes())\n classes = [SchemaClass(_cls, self) for _cls in classes]\n return classes", "def get_classes(self):\n return", "def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes", "def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]", "def get_classes(self):\n return self._classes", "def classes(self):\r\n return self._classes", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def getClasses(self):\n self._process()\n return self._sets", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def classes(self):\n return list(self._classes_generator())", "def parent_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n root_node = list(nx.topological_sort(self.se.full_class_only_graph))\n # When a schema is not a tree with only one root node\n # Set \"Thing\" as the root node by default\n if 'http://schema.org/Thing' in root_node:\n root_node = 'http://schema.org/Thing'\n else:\n root_node = root_node[0]\n paths = nx.all_simple_paths(self.se.full_class_only_graph,\n source=root_node,\n target=self.uri)\n paths = [_path[:-1] for _path in paths]\n result = restructure_output(self,\n paths,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def find(self):\n\n response = self.client.get(Classes.PATH_CLASSES)\n return response", "def models(self):\n return self._base.classes", "def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi", "def get_data_classes(self):\n return self._do_request(\"dataclasses\")", "def classes(self):\n return self._.d", "def class_types(self) -> Set[str]:\n # NOTE: This version is simple, but some dependent classes\n # (notably RawSegment) override this with something more\n # custom.\n return self._class_types", "def get_class_definitions(cls):\n\n return cls._namespace", "def classes(self):\n if \"classes\" in self._prop_dict:\n return ClassesCollectionPage(self._prop_dict[\"classes\"])\n else:\n return None", "def get_schema_cls() -> t.Any:\n return None", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def get_classes(self, include_ref=True):\n defs = self._get_class_defs()\n ans = {}\n ans.update(defs)\n if include_ref:\n refs = self._get_class_refs()\n ans.update(refs)\n return list(ans.values())", "def get_meta_classes(self):\n return self.meta_classes.values()", "def obj_classes(self) -> ObjClassCollection:\n return self._obj_classes", "def returns_distinct_classes(self):\n assert simple_class() is not simple_class()", "def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)", "def child_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.full_class_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def all(klass):\n return klass.find()", "def get_fixture_classes(self):\n class_docs = self.axdb_client.get_fixture_classes()\n return [FixtureClass.deserialize_axdbdoc(doc) for doc in class_docs]", "def descendant_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.full_class_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def classes(self):\n return self.browser.classes(self)", "def relevant_classifications(self):\n return self.relevant_classes", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uris) == list:\n warnings.warn(\"Found more than 1 classes defined within schema using label {}\".format(class_name))\n return [SchemaClass(_item, self, output_type) for _item in uris]\n else:\n return SchemaClass(class_name, self, output_type)", "def _get_my_schemas(type_, package=None):\n if package is None:\n package = session.package\n if package is None:\n raise TypeError(\"no package set in session, must be specified\")\n class TypeSchemas(ElementCollection):\n def __iter__(self):\n return type_.iter_my_schemas(package)\n def __len__(self):\n return type_.count_my_schemas(package)\n def __contains__(self, s):\n return getattr(s, \"ADVENE_TYPE\", None) == LIST \\\n and s.get_meta(CAMSYS_TYPE, None) == \"schema\" \\\n and type_ in s\n return TypeSchemas(package)", "def class_exts(cls):\n return set()", "def list_embedded_metadata_classes():\n return [\n obj\n for name, obj in inspect.getmembers(sys.modules[__name__])\n if inspect.isclass(obj) and issubclass(obj, EmbeddedMetadata) and obj.__module__.startswith(__name__)\n ]", "def findModuleSchemas(self):\n if self.codebase:\n module = self.codebase.instantiate(self.service_module_name)\n else:\n module = importlib.import_module(self.service_module_name)\n\n res = []\n\n for o in dir(module):\n if isinstance(getattr(module, o), Schema):\n res.append(getattr(module, o))\n\n return res", "def classes(class_name):\r\n\td = {}\r\n\tfor k, v in class_name.__dict__.items():\r\n\t\tif not (k.startswith('__') and k.endswith('__')):\r\n\t\t\td[k] = v\r\n\treturn d", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getWordClasses(self):\n it = self._call_java('getWordClasses').toIterator()\n result = []\n while (it.hasNext()):\n result.append(it.next().toString())\n return result", "def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls", "def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)", "def direct_descendant_type_set(self) -> Set[str]:\n return set(chain.from_iterable(seg.class_types for seg in self.segments))", "def instance_classes(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_classes\")", "def GetScaffolderClasses(cls) -> Iterator[Type[interface.Scaffolder]]:\n for scaffolder_class in cls._scaffolder_classes.values():\n yield scaffolder_class", "def get_all_classes_defined_in_module(module):\n for _cls in inspect.getmembers(module, inspect.isclass):\n if module.__name__ == _cls[1].__module__:\n yield _cls", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def get_classes(engine: Engine) -> Dict[str, PlayableClass]:\n\n classes = engine.get_classes()\n assert classes is not None\n\n class_objs = {}\n for class_idx_data in classes:\n class_data = PlayableClass(engine, class_idx_data[\"id\"])\n class_objs[class_data.to_serialize[\"slug\"]] = class_data\n return class_objs", "def class_types(self):\n\n if None is self.__class_types:\n self.__class_types = []\n for type_ in self.argument_types:\n decl = None\n type_ = type_traits.remove_reference(type_)\n if type_traits_classes.is_class(type_):\n decl = type_traits_classes.class_traits.get_declaration(\n type_)\n elif type_traits_classes.is_class_declaration(type_):\n tt = type_traits_classes.class_declaration_traits\n decl = tt.get_declaration(type_)\n else:\n pass\n if decl:\n self.__class_types.append(decl)\n return self.__class_types", "def get_parents_of_class(class_uri, endpoint):\n query = \"\"\"\n select distinct ?c where{\n <%s> rdfs:subClassOf ?c.\n }\n \"\"\" % class_uri\n results = run_query(query=query, endpoint=endpoint)\n classes = [r['c']['value'] for r in results]\n return classes", "def getleafsubclasses(cls):\n scls = itersubclasses(cls)\n return [s for s in scls if not s.__subclasses__()]", "def getSchema(cls):\n pass", "def classes(self) -> Iterable[GDScriptClass]:\n for item in self._classes_by_type_id.values():\n yield item", "def get_subclasses(self, class_name):\n return class_name.__subclasses__()", "def GetClassBases(self,cls):\n name = \"\"\n for i in cls:\n if i != \")\":\n name+=i\n\n lst = name.split(\"(\")\n cls_lst = lst[-1].split(\",\")\n if cls_lst:\n return cls_lst\n else:\n return False", "def available_subclasses(cls):\n return (subclass for subclass in cls.all_named_subclasses() if subclass.available_on_system())", "def _used_annotations(cls) -> set:\n return set(field.type for field in dataclasses.fields(cls))", "def dataclasses(self):\n return self._dataclasses", "def get_instance_classes():\n return Base_Instance.instance_classes", "def get_all(cls):\n result = cls.query.all()\n if not result:\n return {\n \"message\": \"The class of objects do not exist\",\n \"help\": \"Ensure the class required has objects.\"\n }\n return result", "def all_entity_classes():\n persistent_classes = Entity._decl_class_registry.values()\n # with sqlalchemy 0.8 _decl_class_registry holds object that are not classes\n return [ cls for cls in persistent_classes\n if isclass(cls) and issubclass(cls, Entity) ]", "def gen_extractor_classes():\n from .extractors import _ALL_CLASSES\n\n return _ALL_CLASSES", "def test___get_all_classes():\n config = {\"plugins\": [\"tests.mock_plugin\"]}\n classes = r._get_all_classes(config, r.DataSource)\n assert \"food\" in classes\n classes = r._get_all_classes(config, r.DataSink)\n assert \"food\" in classes", "def get_classes(mod):\n return [\n key\n for key, _ in inspect.getmembers(mod, inspect.isclass)\n if key[0].isupper()\n ]", "def classes(self):\n return str(self._classes)", "def get_all_lr_classes():\n lr_classes = {}\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(obj) and name != 'ABC':\n lr_classes[name] = obj\n return lr_classes", "def subclasses(cls) -> Iterator:\n for subclass in cls.__subclasses__():\n if subclass._type_definition.description: # type: ignore\n yield subclass\n yield from subclass.subclasses()", "def mes_classes(self):\n\t\tfrom pykol.models.base import Classe\n\t\tif self.has_perm('pykol.direction'):\n\t\t\treturn Classe.objects.all()\n\t\ttry:\n\t\t\treturn self.professeur.mes_classes()\n\t\texcept ObjectDoesNotExist:\n\t\t\treturn Classe.objects.none()", "def get_subclasses(self, klass: TypeInfo) -> OrderedSet[TypeInfo]:\n if klass not in self._graph:\n return OrderedSet([klass])\n result: OrderedSet[TypeInfo] = OrderedSet(nx.descendants(self._graph, klass))\n result.add(klass)\n return result", "def get_defined_pair_classes(self):\n self._collect()\n if self.mode == 'dynamic':\n return []\n else:\n return self._preloaded_pair_classes", "def FindChilds(self,cls):\n childs = []\n for i in self.classes:\n if self.InheritsFrom(cls,i):\n childs.append(i)\n if childs:\n return childs\n else:\n return False", "def schemas(self):\n return model.Schemas(self)", "def classes(attrs):\n return attrs.get('class', '').split()", "def DSC(self):\n return len(self.user_defined_classes)", "def _get_filter_classes_from_module(module_name):\n classes = []\n module = utils.import_object(module_name)\n for obj_name in dir(module):\n itm = getattr(module, obj_name)\n if _is_filter_class(itm):\n classes.append(itm)\n return classes", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def contain_one_class(self, documents):\n classes = []\n for document in documents:\n if document.c not in classes:\n if len(classes) == 0:\n classes.append(document.c)\n else:\n return None\n if len(classes) == 1:\n return classes[0]\n else:\n return None" ]
[ "0.78731024", "0.71519256", "0.7128944", "0.7102175", "0.7069883", "0.6910617", "0.67119366", "0.66961735", "0.6668723", "0.6640557", "0.66171163", "0.6541783", "0.6539696", "0.65327054", "0.65109813", "0.64602256", "0.6432275", "0.64070165", "0.63955426", "0.6380493", "0.6377903", "0.6347517", "0.63047945", "0.6294546", "0.6284257", "0.6275353", "0.6270153", "0.6256733", "0.6256733", "0.62550914", "0.6231507", "0.62263745", "0.6223959", "0.6222862", "0.61949056", "0.61949056", "0.61949056", "0.61949056", "0.61949056", "0.61949056", "0.61935306", "0.6191381", "0.610945", "0.61060846", "0.6101337", "0.60911095", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.6074873", "0.60716814", "0.6056181", "0.60448045", "0.6038491", "0.60237163", "0.60213715", "0.6013331", "0.6005995", "0.59914696", "0.59546614", "0.59537554", "0.59372395", "0.5935057", "0.5911256", "0.59010124", "0.5894264", "0.5875553", "0.5868938", "0.5848621", "0.584356", "0.5842018", "0.5838366", "0.5834255", "0.5818338", "0.58034235", "0.5801161", "0.5776939", "0.5772276", "0.57715833", "0.57694113", "0.57592237", "0.57569695", "0.5733633", "0.5724456", "0.5713231", "0.5704323", "0.5701616", "0.56969774", "0.5696664" ]
0.65426725
11
get only classes referenced outside this schema
def get_class_refs(self): return list(self._get_class_refs().values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_all_classes(self):\n classes = list(self.extended_class_only_graph.nodes())\n classes = [SchemaClass(_cls, self) for _cls in classes]\n return classes", "def parent_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n root_node = list(nx.topological_sort(self.se.full_class_only_graph))\n # When a schema is not a tree with only one root node\n # Set \"Thing\" as the root node by default\n if 'http://schema.org/Thing' in root_node:\n root_node = 'http://schema.org/Thing'\n else:\n root_node = root_node[0]\n paths = nx.all_simple_paths(self.se.full_class_only_graph,\n source=root_node,\n target=self.uri)\n paths = [_path[:-1] for _path in paths]\n result = restructure_output(self,\n paths,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def getRoots():\n ret = []\n for cc in Concept.objects.all():\n if Link.objects.filter(descendant=cc).count() == 0:\n ret.append(cc)\n return ret", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def get_parents_of_class(class_uri, endpoint):\n query = \"\"\"\n select distinct ?c where{\n <%s> rdfs:subClassOf ?c.\n }\n \"\"\" % class_uri\n results = run_query(query=query, endpoint=endpoint)\n classes = [r['c']['value'] for r in results]\n return classes", "def descendant_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.full_class_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def cross_schema_fk_reflection(self):\n return exclusions.closed()", "def classes(self):\r\n return self._classes", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def obj_classes(self) -> ObjClassCollection:\n return self._obj_classes", "def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes", "def models(self):\n return self._base.classes", "def get_classes(self):\n return self._classes", "def get_classes(self):\n return", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def classes(self):\n return self._.d", "def getReferencedTypes(self):\n\n raise AbstractMethodException(self.__class__)", "def direct_descendant_type_set(self) -> Set[str]:\n return set(chain.from_iterable(seg.class_types for seg in self.segments))", "def child_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.full_class_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def referencedNamespaces (self):\n return frozenset(self.__referencedNamespaces)", "def get_classes(self, include_ref=True):\n defs = self._get_class_defs()\n ans = {}\n ans.update(defs)\n if include_ref:\n refs = self._get_class_refs()\n ans.update(refs)\n return list(ans.values())", "def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi", "def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]", "def list_embedded_metadata_classes():\n return [\n obj\n for name, obj in inspect.getmembers(sys.modules[__name__])\n if inspect.isclass(obj) and issubclass(obj, EmbeddedMetadata) and obj.__module__.startswith(__name__)\n ]", "def _get_parents(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_super_classes(self)", "def relevant_classifications(self):\n return self.relevant_classes", "def FindChilds(self,cls):\n childs = []\n for i in self.classes:\n if self.InheritsFrom(cls,i):\n childs.append(i)\n if childs:\n return childs\n else:\n return False", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def getleafsubclasses(cls):\n scls = itersubclasses(cls)\n return [s for s in scls if not s.__subclasses__()]", "def references(self):\n return self._get_related_resources(False)", "def reference_types(self):\n return self.references.keys()", "def reference_types(self):\n return self.references.keys()", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def get_non_inheriting_objects(self):\n return get_non_inheriting_objects(self)", "def all(klass):\n return klass.find()", "def classes(self):\n if \"classes\" in self._prop_dict:\n return ClassesCollectionPage(self._prop_dict[\"classes\"])\n else:\n return None", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def referenced_nodes(self):\n return self._referenced_nodes", "def XXXcheck_class_dependencies(self, node):\n # keep track of types which are used by methods arguments\n used_types = {}\n for method in node[\"methods\"]:\n self.check_function_dependencies(method, used_types)\n\n modules = {}\n for typ in used_types.values():\n if typ.f_module:\n for mname, only in typ.f_module.items():\n module = modules.setdefault(mname, {})\n if only: # Empty list means no ONLY clause\n for oname in only:\n module[oname] = True\n\n # Always add C_PTR, needed for class F_derived_member\n modules.setdefault(\"iso_c_binding\", {})[\"C_PTR\"] = True\n\n F_modules = [] # array of tuples ( name, (only1, only2) )\n for mname in sorted(modules):\n F_modules.append((mname, sorted(modules[mname])))\n node.F_module_dependencies = F_modules", "def _unresolvedDependents (self):\n return self.__unresolvedDependents", "def findModuleSchemas(self):\n if self.codebase:\n module = self.codebase.instantiate(self.service_module_name)\n else:\n module = importlib.import_module(self.service_module_name)\n\n res = []\n\n for o in dir(module):\n if isinstance(getattr(module, o), Schema):\n res.append(getattr(module, o))\n\n return res", "def get_subclasses(self, klass: TypeInfo) -> OrderedSet[TypeInfo]:\n if klass not in self._graph:\n return OrderedSet([klass])\n result: OrderedSet[TypeInfo] = OrderedSet(nx.descendants(self._graph, klass))\n result.add(klass)\n return result", "def classified_sources(self):\n return self._classified_sources", "def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)", "def ancestry_iris(self):\n return list(self._class_types)", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def inheritors(cls):\n subclasses = set()\n work = [cls]\n while work:\n parent = work.pop()\n for child in parent.__subclasses__():\n if child not in subclasses:\n subclasses.add(child)\n work.append(child)\n return subclasses", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def getClasses(self):\n self._process()\n return self._sets", "def descendant_type_set(self) -> Set[str]:\n return set(\n chain.from_iterable(\n seg.descendant_type_set | seg.class_types for seg in self.segments\n )\n )", "def descendants(self):\r\n\r\n descendants = BuildFile.scan_buildfiles(self.root_dir, self.parent_path)\r\n for sibling in self.family():\r\n descendants.discard(sibling)\r\n return descendants", "def inheritors(klass):\n subclasses = set()\n work = [klass]\n while work:\n parent = work.pop()\n for child in parent.__subclasses__():\n if child not in subclasses:\n subclasses.add(child)\n work.append(child)\n return subclasses", "def inheritors(klass):\n subclasses = set()\n work = [klass]\n while work:\n parent = work.pop()\n for child in parent.__subclasses__():\n if child not in subclasses:\n subclasses.add(child)\n work.append(child)\n return subclasses", "def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links", "def relationships(self):", "def roots(cls, target_type):\r\n return cls._ROOTS_BY_TYPE[target_type]", "def test_get_all_ancestor_types(self):\n pass", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def get_orphans(self, course_key):\r\n detached_categories = [name for name, __ in XBlock.load_tagged_classes(\"detached\")]\r\n query = self._course_key_to_son(course_key)\r\n query['_id.category'] = {'$nin': detached_categories}\r\n all_items = self.collection.find(query)\r\n all_reachable = set()\r\n item_locs = set()\r\n for item in all_items:\r\n if item['_id']['category'] != 'course':\r\n # It would be nice to change this method to return UsageKeys instead of the deprecated string.\r\n item_locs.add(\r\n Location._from_deprecated_son(item['_id'], course_key.run).replace(revision=None).to_deprecated_string()\r\n )\r\n all_reachable = all_reachable.union(item.get('definition', {}).get('children', []))\r\n item_locs -= all_reachable\r\n return list(item_locs)", "def find_objs(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return nodes", "def _inspect_module(module):\n module_list = getmembers(module, predicate=ismodule)\n classes = getmembers(module, predicate=isclass)\n for (name, cls) in classes:\n if issubclass(cls, db.Model) and not issubclass(cls, Taxonomy):\n if cls is not db.Model:\n _data_classes[name] = cls\n return [mod[1] for mod in module_list]", "def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]", "def objects(self, cls):\n for name, info in direct_fields(self.__class__).items():\n if issubclass(cls, info.sub_fields[0].type_):\n return getattr(self, name)\n raise TypeError(cls)", "def get_data_classes(self):\n return self._do_request(\"dataclasses\")", "def schemas(self):\n return model.Schemas(self)", "def returns_distinct_classes(self):\n assert simple_class() is not simple_class()", "def fk_associations(cls):\n return cls._fk_associations", "def associatedObjects (self):\n return self.__associatedObjects", "def test_type_builder_handles_all_of_references():\n schema = [\n SchemaObject(\n name=\"ClassWithAllOf\",\n properties=[\n SchemaAllOf(\n name=\"authorValue\",\n all_of=[\n SchemaReference(name=\"\", reference=\"ReferencedObject\"),\n SchemaObject(\n name=\"role\",\n properties=[\n SchemaEnum(\n name=\"\", value_type=\"string\", values=[\"AUTHOR\"]\n ),\n ],\n ),\n ],\n )\n ],\n ),\n SchemaObject(\n name=\"ReferencedObject\",\n properties=[SchemaValue(name=\"stringValue\", value_type=\"string\")],\n ),\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 2\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithAllOf\",\n properties=[\n PropertyDefinition(\n name=\"author_value\",\n key=\"authorValue\",\n value_type=\"ReferencedObject\",\n known_type=False,\n )\n ],\n depends_on={\"ReferencedObject\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ReferencedObject\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n )\n ],\n depends_on=set(),\n )", "def get_class_defs(self):\n return list(self._get_class_defs().values())", "def get_bases(self):\n return self.py_class.__bases__", "def descendants(cls):\n return cls.__subclasses__() + \\\n [g for s in cls.__subclasses__() for g in s.descendants()]", "def find(self):\n\n response = self.client.get(Classes.PATH_CLASSES)\n return response", "def getRootIsolatedObjects():\n return frozenset([id for id, obj in getSite().aq_parent.objectItems() if IObjectToIsolate.providedBy(obj)])", "def get_instance_classes():\n return Base_Instance.instance_classes", "def associated_objects(self):\n return self._associated_objects", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def get_type_outside_of(\n self, klasses: OrderedSet[TypeInfo]\n ) -> OrderedSet[TypeInfo]:\n results = OrderedSet(self._types.values())\n for info in klasses:\n results.difference_update(self.get_subclasses(info))\n return results", "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def get_superclasses(self, klass: TypeInfo) -> OrderedSet[TypeInfo]:\n if klass not in self._graph:\n return OrderedSet([klass])\n result: OrderedSet[TypeInfo] = OrderedSet(nx.ancestors(self._graph, klass))\n result.add(klass)\n return result", "def simple_reflections(self):\n return [s(self) for s in self.parent().simple_reflections()]", "def get_schema_cls() -> t.Any:\n return None", "def get_direct_dependencies(*objs):\n\n deps = set()\n for obj in objs:\n if isinstance(obj, schema.File):\n for enum in obj.enum_list:\n deps.add(enum)\n for interface in obj.interface_list:\n deps.add(interface)\n for struct in obj.struct_list:\n deps.add(struct)\n for trait in obj.trait_list:\n deps.add(trait)\n deps |= get_direct_dependencies(*obj.interface_list)\n deps |= get_direct_dependencies(*obj.struct_list)\n deps |= get_direct_dependencies(*obj.resource_list)\n deps |= get_direct_dependencies(*obj.trait_list)\n deps |= get_direct_dependencies(*obj.typespace_list)\n elif isinstance(obj, schema.SchemaObjectList):\n deps |= get_direct_dependencies(*obj)\n elif isinstance(obj, schema.Schema):\n deps |= get_direct_dependencies(*obj.vendor_list)\n elif isinstance(obj, schema.Vendor):\n for enum in obj.enum_list:\n deps.add(enum)\n for interface in obj.interface_list:\n deps.add(interface)\n for struct in obj.struct_list:\n deps.add(struct)\n for trait in obj.trait_list:\n deps.add(trait)\n deps |= get_direct_dependencies(*obj.interface_list)\n deps |= get_direct_dependencies(*obj.struct_list)\n deps |= get_direct_dependencies(*obj.resource_list)\n deps |= get_direct_dependencies(*obj.trait_list)\n deps |= get_direct_dependencies(*obj.typespace_list)\n elif isinstance(obj, schema.Resource):\n for component in obj.component_list:\n deps.add(component.trait)\n for group in obj.group_list:\n deps.add(group)\n elif isinstance(obj, schema.Interface):\n for component in obj.component_list:\n deps.add(component.trait)\n elif isinstance(obj, schema.Typespace):\n for enum in obj.enum_list:\n deps.add(enum)\n for struct in obj.struct_list:\n deps.add(struct)\n deps |= get_direct_dependencies(*obj.struct_list)\n elif isinstance(obj, schema.Trait):\n for command in obj.command_list:\n deps.add(command)\n for enum in obj.enum_list:\n deps.add(enum)\n for event in obj.event_list:\n deps.add(event)\n for struct in obj.struct_list:\n deps.add(struct)\n deps |= get_direct_dependencies(*obj.command_list)\n deps |= get_direct_dependencies(*obj.event_list)\n deps |= get_direct_dependencies(*obj.state_list)\n deps |= get_direct_dependencies(*obj.struct_list)\n elif isinstance(obj, schema.Command):\n if obj.response is not None:\n deps.add(obj.response)\n deps |= get_direct_dependencies(*obj.parameter_list)\n elif isinstance(obj, schema.CommandResponse):\n deps |= get_direct_dependencies(*obj.field_list)\n elif isinstance(obj, schema.Event):\n deps |= get_direct_dependencies(*obj.field_list)\n elif isinstance(obj, schema.Struct):\n deps |= get_direct_dependencies(*obj.field_list)\n elif isinstance(obj, schema.Field):\n if obj.data_type == schema.Field.DataType.ENUM:\n deps.add(obj.enum_type)\n elif obj.data_type == schema.Field.DataType.STRUCT:\n deps.add(obj.struct_type)\n return deps", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for element in self.elements:\n if isinstance(element, CellReference) or isinstance(\n element, CellArray):\n if recursive:\n dependencies.update(\n element.ref_cell.get_dependencies(True))\n dependencies.add(element.ref_cell)\n return dependencies", "def class_exts(cls):\n return set()", "def class_types(self) -> Set[str]:\n # NOTE: This version is simple, but some dependent classes\n # (notably RawSegment) override this with something more\n # custom.\n return self._class_types", "def remove_subclass_objects(cls):\n # get the content type\n cont_type = ContentType.objects.get_for_model(cls)\n \n # return only this cont type\n return cls.objects.filter(content_type=cont_type)", "def deferred(ref):\n module, _ = ref.split(\".\", 1)\n if module in sys.modules:\n return _getcls(ref)\n\n @meta\n def check(cls):\n full_cls_mod = getattr(cls, \"__module__\", None)\n cls_module = full_cls_mod.split(\".\", 1)[0] if full_cls_mod else None\n if cls_module == module:\n return issubclass(cls, _getcls(ref))\n else:\n return False\n\n return check", "def dump_class_ref_counts(referrer_depth=2, cutoff=500, rcutoff=1,\r\n ignore=('tuple', 'list', 'function', 'dict',\r\n 'builtin_function_or_method',\r\n 'wrapper_descriptor')):\r\n import gc\r\n __dump_class_ref_counts(gc, referrer_depth, cutoff, rcutoff, ignore)\r\n gc.collect()\r\n plog(\"NOTICE\", \"GC: Done.\")", "def get_class_definitions(cls):\n\n return cls._namespace", "def get_all_classes_defined_in_module(module):\n for _cls in inspect.getmembers(module, inspect.isclass):\n if module.__name__ == _cls[1].__module__:\n yield _cls", "def referencing_nodes(self):\n\n return self._referencing_nodes", "def instance_classes(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_classes\")", "def GetClassBases(self,cls):\n name = \"\"\n for i in cls:\n if i != \")\":\n name+=i\n\n lst = name.split(\"(\")\n cls_lst = lst[-1].split(\",\")\n if cls_lst:\n return cls_lst\n else:\n return False" ]
[ "0.6365546", "0.62089795", "0.6205446", "0.6171531", "0.60406595", "0.59149647", "0.5896615", "0.58919096", "0.5831818", "0.57982343", "0.5780626", "0.5756438", "0.57559365", "0.5708741", "0.5698709", "0.5691668", "0.5679516", "0.56510174", "0.56453377", "0.5609935", "0.558028", "0.5576756", "0.557536", "0.55385894", "0.55295783", "0.5522076", "0.54967463", "0.5466827", "0.543115", "0.54111457", "0.53993434", "0.53993434", "0.5392435", "0.5389922", "0.53855354", "0.53724533", "0.5371052", "0.5371052", "0.5371052", "0.5371052", "0.5371052", "0.5371052", "0.53454995", "0.53411007", "0.5341007", "0.53399664", "0.53364533", "0.5309077", "0.5306081", "0.5295506", "0.5291437", "0.5282669", "0.5275979", "0.521902", "0.521307", "0.52103585", "0.52082014", "0.52082014", "0.5204355", "0.5200354", "0.5196283", "0.51932305", "0.5176379", "0.5164411", "0.5151856", "0.51444846", "0.5141755", "0.5133114", "0.51246035", "0.5120539", "0.5119625", "0.51193374", "0.51168406", "0.5111655", "0.5101062", "0.5088728", "0.50756246", "0.50741065", "0.50634533", "0.5063082", "0.50630647", "0.50630313", "0.50630313", "0.50630176", "0.50452113", "0.50411063", "0.50311655", "0.50301397", "0.5028299", "0.5023616", "0.50212705", "0.501508", "0.5004854", "0.5004581", "0.50035864", "0.50035626", "0.50015485", "0.49981022", "0.49919102", "0.49883524" ]
0.6016841
5
get all classes and label them if they are referenced if include_ref is False, only "defined" classes are included.
def get_classes(self, include_ref=True): defs = self._get_class_defs() ans = {} ans.update(defs) if include_ref: refs = self._get_class_refs() ans.update(refs) return list(ans.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_refs(self):\n return list(self._get_class_refs().values())", "def process_class_list(self, module, classes):", "def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)", "def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)", "def _class_list(parent, section, objects, refs):\n\n sec = etree.SubElement(parent, section, count=str(len(objects)))\n\n for cls, objs in _class_count(objects):\n obj = etree.SubElement(sec, \"Object\", type=cls, count=str(len(objs)))\n if refs:\n _class_list(obj, \"Referrers\", gc.get_referrers(*objs), False)", "def classes(self):\r\n return self._classes", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def child_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.full_class_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def get_classes(self):\n return self._classes", "def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)", "def get_classes(self):\n return", "def dump_class_ref_counts(referrer_depth=2, cutoff=500, rcutoff=1,\r\n ignore=('tuple', 'list', 'function', 'dict',\r\n 'builtin_function_or_method',\r\n 'wrapper_descriptor')):\r\n import gc\r\n __dump_class_ref_counts(gc, referrer_depth, cutoff, rcutoff, ignore)\r\n gc.collect()\r\n plog(\"NOTICE\", \"GC: Done.\")", "def class_labels(self):\n return self._class_labels", "def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret", "def getClasses(self):\n self._process()\n return self._sets", "def descendant_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.full_class_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key", "def classes(self):\n if \"classes\" in self._prop_dict:\n return ClassesCollectionPage(self._prop_dict[\"classes\"])\n else:\n return None", "def getByReferenceClassifiers(inpClassifiers, startExecCount=0):\n\toutVals = [_ByReferenceClassifier(inpClassifier, execCount=startExecCount) for inpClassifier in inpClassifiers]\n\treturn outVals", "def get_classes_conditional(doxy_xml_files, cond):\n found = {}\n for xmlfile in doxy_xml_files:\n xml = lxml.etree.parse(xmlfile)\n classes = xml.xpath('.//compounddef[@kind=\"class\" or @kind=\"struct\"]')\n for cl in classes:\n if cond(cl):\n classname = cl.find('./compoundname')\n baseclasses = cl.xpath('./basecompoundref')\n membervars = cl.xpath('.//memberdef[@kind=\"variable\"]/name')\n\n # An exception: Members get attached to Graph classes\n # through this macro, and is not understood by\n # Doxygen, so we have to parse it outselves.\n graphvars = cl.xpath('.//memberdef[@kind=\"function\"]/name'\n +'[text()=\"INSTALL_GRAPH_PROPERTIES\"]')\n graphmems = []\n\n if len(graphvars)>0:\n r = re.compile('\\(\\(\\w+,\\s*[\\w: ]+,\\s*(\\w+)\\)\\)')\n for g in graphvars:\n for a in g.xpath('../argsstring'):\n graphmems += r.findall(a.text)\n # The INSTALL_GRAPH_PROPERTIES macro also adds a\n # bool called \"dummy\"\n graphmems.append('dummy')\n\n location = cl.find('./location')\n found[classname.text] = (\n {'name': classname.text,\n 'bases': [base.text for base in baseclasses],\n 'members': [mem.text for mem in membervars] + graphmems,\n 'filepath': location.attrib['file'],\n 'line': int(location.attrib['line']),\n 'abstract': cl.xpath('@abstract=\"yes\"'),\n })\n return found", "def classes(self):\n return self._.d", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None", "def relevant_classifications(self):\n return self.relevant_classes", "def get_label_classes(scope, op, node_names=False):\n options = scope.get_options(op, dict(nocl=False))\n if options[\"nocl\"]:\n if len(op.classes_.shape) > 1 and op.classes_.shape[1] > 1:\n raise RuntimeError(\n \"Options 'nocl=True' is not implemented for multi-label \"\n \"classification (class: {}).\".format(op.__class__.__name__)\n )\n classes = np.arange(0, len(op.classes_))\n elif node_names:\n try:\n options = scope.get_options(op, dict(zipmap=False))\n zipcol = options[\"zipmap\"] == \"columns\"\n except NameError:\n zipcol = False\n if zipcol:\n clnames = op.classes_.ravel()\n if np.issubdtype(clnames.dtype, np.integer) or clnames.dtype == np.bool_:\n classes = np.array([\"i%d\" % c for c in clnames])\n else:\n classes = np.array([\"s%s\" % c for c in clnames])\n else:\n classes = op.classes_\n elif hasattr(op, \"classes_\"):\n classes = op.classes_\n elif hasattr(op, \"intercept_\"):\n classes = len(op.intercept_)\n elif hasattr(op, \"y_\"):\n # _ConstantPredictor\n classes = np.array(list(sorted(set(op.y_))))\n else:\n raise RuntimeError(\n \"No known ways to retrieve the number of classes for class %r.\"\n \"\" % type(op)\n )\n return classes", "def import_all_known_classes(debug=False):\r\n\r\n output = {}\r\n for cls in KnownClass.objects:\r\n if debug:\r\n print \"Importing %s.%s\"%(cls.module_name, cls.class_name)\r\n x = get_class(cls.module_name, cls.class_name)\r\n output[(cls.module_name, cls.class_name)] = x()\r\n return output", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def local_classes(self, classnames, typesets=frozenset(['cy', 'py'])):\n saved = {}\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n saved[name, 'c'] = _undot_class_name(name, self.cython_ctypes)\n if 'cy' in typesets and name in self.cython_cytypes:\n saved[name, 'cy'] = _undot_class_name(name, self.cython_cytypes)\n if 'py' in typesets and name in self.cython_pytypes:\n saved[name, 'py'] = _undot_class_name(name, self.cython_pytypes)\n self.clearmemo()\n yield\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n _redot_class_name(name, self.cython_ctypes, saved[name, 'c'])\n if 'cy' in typesets and name in self.cython_cytypes:\n _redot_class_name(name, self.cython_cytypes, saved[name, 'cy'])\n if 'py' in typesets and name in self.cython_pytypes:\n _redot_class_name(name, self.cython_pytypes, saved[name, 'py'])\n self.clearmemo()", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def get_class_defs(self):\n return list(self._get_class_defs().values())", "def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]", "def get_labels_and_classes(self):\n query = read_query('structure exploration/labels_and_classes')\n response = self._submit_query(query)\n\n temp = dict()\n for r in response:\n temp[r['l']['value']] = r['type']['value'].split('/')[-1]\n\n return temp", "def classes_from_headers(all_headers, include_paths):\n import os, os.path, tempfile, shutil\n classes = []\n try:\n d = tempfile.mkdtemp()\n hpp = os.path.join(d, 'headers.hpp')\n compiled = os.path.join(d, 'out.cpp')\n with open(hpp,'w') as h:\n [print('#include \"%s\"'%i, file=h) for i in all_headers]\n cxx = 'g++'\n if 'CXX' in os.environ:\n cxx = os.environ['CXX']\n cmd = [cxx, '-o', compiled, '-E']\n for i in include_paths:\n cmd += ['-I', i]\n cmd.append(hpp)\n # print(' '.join(cmd))\n os.system(' '.join(cmd))\n with open(compiled, 'r') as out:\n for line in out:\n words = line.split()\n if len(words)>=2 and (words[0]=='class' or words[0]=='struct'):\n classes.append(words[1].strip(':'))\n return classes\n finally:\n shutil.rmtree(d)", "def _get_gen_classes(self, bgc_like, gcf_as_cutoff=0.5):\n # assess if bgc or gcf\n is_bgc = isinstance(bgc_like, BGC)\n if is_bgc:\n # get parent gcf for bgc\n bgc_like_gcf = [\n gcf for gcf in self.npl.gcfs\n if bgc_like.bgc_id in [b.bgc_id for b in gcf.bgcs]\n ][0]\n # gather AS classes and convert to names in scoring dict\n as_classes = self.npl.class_matches.convert_as_classes(\n bgc_like.product_prediction.split(\".\"))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like_gcf.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n else:\n as_classes = self.npl.class_matches.convert_as_classes(\n self.npl.class_matches.get_gcf_as_classes(\n bgc_like, gcf_as_cutoff))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n return bgc_like_classes_dict", "def classes(self) -> List[Any]:\n return list(self.label_counts.keys())", "def class_names(self):\n raise NotImplementedError", "def wrap_simple_classes(self):\n # Base ref-counted classes are abstract because it is necessary to set up reference counting.\n # Wrapper classes do that in their public constructor.\n klasses = []\n for struct in self.repo.structs.values():\n if struct.is_class and struct.c_name not in self.base_classes:\n wrapped_name = struct.vala_name + \"Ref\"\n wrapped_c_name = 'Cef' + wrapped_name\n members = [\n StructMember(\"GData*\", \"private_data\", \"private_data\"),\n StructMember(\"volatile int\", \"ref_count\", \"ref_count\")\n ]\n\n # Vala definition\n klass = Struct(\n c_name=wrapped_c_name,\n vala_name=wrapped_name,\n c_header=\"valacef_api.h\",\n members=members)\n klass.set_parent(struct)\n klass.set_is_class(True)\n construct = Function(\n c_name=wrapped_c_name + \"New\",\n vala_name=wrapped_name,\n c_header=\"valacef_api.h\")\n construct.construct = True\n klass.add_method(construct)\n\n priv_set = Function(\n c_name=wrapped_c_name + \"PrivSet\",\n vala_name=\"priv_set\",\n c_header=\"valacef_api.h\",\n params=[\n (\"const char*\", \"key\"),\n (\"T\", \"data\"),\n ],\n vala_generics=[\"T\"],\n vala_simple_generics=True\n )\n klass.add_method(priv_set)\n priv_get = Function(\n c_name=wrapped_c_name + \"PrivGet\",\n vala_name=\"priv_get\",\n c_header=\"valacef_api.h\",\n params=[\n (\"const char*\", \"key\"),\n ],\n ret_type=\"T\",\n vala_generics=[\"T\"],\n vala_simple_generics=True\n )\n klass.add_method(priv_get)\n klass.add_method(Function(\n c_name=wrapped_c_name + \"PrivDel\",\n vala_name=\"priv_del\",\n c_header=\"valacef_api.h\",\n params=[\n (\"const char*\", \"key\"),\n ],\n ))\n\n klasses.append(klass)\n\n # C definition\n c_klass = Struct(\n c_name=wrapped_c_name,\n vala_name=wrapped_name,\n c_header=\"stdlib.h;capi/cef_base_capi.h\",\n members=members)\n c_klass.set_parent(struct)\n c_klass.set_is_class(True)\n construct = Function(wrapped_c_name + \"New\", wrapped_name, \"\", wrapped_c_name + '*', body=[\n '%s* self = (%s*) calloc(1, sizeof(%s));' % (wrapped_c_name, wrapped_c_name, wrapped_c_name),\n '%s((void*) self, sizeof(%s), sizeof(%s));' % (\n 'cef_base_ref_counted_init_ref_counting', struct.c_name, wrapped_c_name),\n 'g_datalist_init(&(self->private_data));',\n 'return self;'\n ])\n construct.construct = True\n c_klass.add_method(construct)\n\n priv_set = Function(wrapped_c_name + \"PrivSet\", \"priv_set\", \"\", params=[\n (wrapped_c_name + \"*\", \"self\"),\n (\"const char*\", \"key\"),\n (\"void*\", \"data\"),\n ('GDestroyNotify', 'destroy'),\n ],\n body=[\n 'g_assert (self != NULL);',\n 'g_assert (key != NULL);',\n 'g_datalist_id_set_data_full(',\n '&self->private_data, g_quark_from_string(key), data, data ? destroy : (GDestroyNotify) NULL);',\n ])\n c_klass.add_method(priv_set)\n priv_get = Function(wrapped_c_name + \"PrivGet\", \"priv_get\", \"\", params=[\n (wrapped_c_name + \"*\", \"self\"),\n (\"const char*\", \"key\"),\n ],\n ret_type=\"void*\",\n body=[\n 'g_assert (self != NULL);',\n 'g_assert (key != NULL);',\n 'return g_datalist_get_data(&self->private_data, key);',\n ])\n c_klass.add_method(priv_get)\n c_klass.add_method(Function(wrapped_c_name + \"PrivDel\", \"priv_del\", \"\", params=[\n (wrapped_c_name + \"*\", \"self\"),\n (\"const char*\", \"key\"),\n ],\n body=[\n 'g_return_if_fail (self != NULL);',\n 'g_return_if_fail (key != NULL);',\n 'g_datalist_remove_data(&self->private_data, key);',\n ]))\n self.add_c_glue(c_klass)\n\n self.repo.add_struct(*klasses)", "def getClasses(bal: \"BKAlignedLayout\"):\n classes = defaultdict(list)\n\n # We need to enumerate all block roots\n roots = set(bal.root)\n for root in roots:\n if root is None:\n print(\"There are no classes in a balanced layout.\")\n break\n\n sink = bal.sink[root]\n classContents = classes[sink]\n classContents.append(root)\n\n return classes", "def resolve_base_classes(classes):\n for cl in classes.values():\n resolved = []\n for base in cl['bases']:\n if base in classes:\n resolved.append(base)\n cl['resolved_bases'] = resolved", "def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi", "def find(self):\n\n response = self.client.get(Classes.PATH_CLASSES)\n return response", "def obj_classes(self) -> ObjClassCollection:\n return self._obj_classes", "def parent_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n root_node = list(nx.topological_sort(self.se.full_class_only_graph))\n # When a schema is not a tree with only one root node\n # Set \"Thing\" as the root node by default\n if 'http://schema.org/Thing' in root_node:\n root_node = 'http://schema.org/Thing'\n else:\n root_node = root_node[0]\n paths = nx.all_simple_paths(self.se.full_class_only_graph,\n source=root_node,\n target=self.uri)\n paths = [_path[:-1] for _path in paths]\n result = restructure_output(self,\n paths,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def class_exts(cls):\n return set()", "def label_all(self):\n\t\tlabels_basic = self.dependency_labels()\n\t\tlabels = Labels(labels_basic)\n\t\treturn labels.label_most()", "def get_classes_related_to_activity(self, activity_name, java_files, classes, related_classes=list()):\r\n activity_file = None\r\n for file in java_files:\r\n if file.endswith(\"%s.java\" % activity_name):\r\n activity_file = file\r\n break\r\n class_content = CommonMethods.read_file(activity_file)\r\n classes_in_activity = [class_name for class_name in classes if class_name in class_content]\r\n for class_name in classes_in_activity:\r\n if class_name not in related_classes:\r\n related_classes.extend(self.get_classes_related_to_activity(activity_name=class_name,\r\n java_files=java_files, classes=classes,\r\n related_classes=classes_in_activity))\r\n related_classes.extend(classes_in_activity)\r\n related_classes = list(dict.fromkeys(related_classes))\r\n return related_classes", "def update_class_records():\n logger.info('Fetching class ids and labels for '\n 'classes with direct instances')\n timestamp = statistics.get_current_timestamp()\n results = sparql.sparql_query(queries.QUERY_CLASSES,\n fallback=queries.QUERY_CLASSSES_FALLBACK)\n\n if 'error' in results:\n logger.error('Got no SPARQL results.')\n return\n\n updated = {}\n\n def value(binding, key):\n return binding[key]['value']\n\n for binding in results['results']['bindings']:\n uri = value(binding, 'cl')\n if not sparql.is_wikidata_entity(uri):\n continue\n\n qid = sparql.wikidata_entity_id(uri)\n label = value(binding, 'clLabel')\n record = {'l': label, }\n\n if 'c' in binding:\n record['i'] = int(value(binding, 'c'))\n updated[qid] = record\n\n logger.info('Augmenting current classes data ...')\n data = statistics.get_json_data('classes')\n merged = statistics.merge(data, updated, default_others={'i': 0})\n statistics.update_json_data('classes', merged, timestamp)", "def get_svg_classes(self):\n ld = set()\n lt = set()\n for element in itertools.chain(self.polygons, self.paths):\n ld.update(zip(element.layers, element.datatypes))\n for label in self.labels:\n lt.add((label.layer, label.texttype))\n for reference in self.references:\n ref_cell = reference.ref_cell\n if isinstance(ref_cell, Cell):\n ref = ref_cell.get_svg_classes()\n ld.update(ref[0])\n lt.update(ref[1])\n return ld, lt", "def getFeatureClassNames(self):\n return self.featureClasses.keys()", "def classes(class_name):\r\n\td = {}\r\n\tfor k, v in class_name.__dict__.items():\r\n\t\tif not (k.startswith('__') and k.endswith('__')):\r\n\t\t\td[k] = v\r\n\treturn d", "def classes(self):\n return str(self._classes)", "def getclasstree(classes, unique=0):\r\n children = {}\r\n roots = []\r\n for c in classes:\r\n if c.__bases__:\r\n for parent in c.__bases__:\r\n if not parent in children:\r\n children[parent] = []\r\n children[parent].append(c)\r\n if unique and parent in classes: break\r\n elif c not in roots:\r\n roots.append(c)\r\n for parent in children:\r\n if parent not in classes:\r\n roots.append(parent)\r\n return walktree(roots, children, None)", "def classes(self):\n return list(self._classes_generator())", "def gen_extractor_classes():\n from .extractors import _ALL_CLASSES\n\n return _ALL_CLASSES", "def get_classes(self):\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n out_classes += (c[:2],)\n\n return ((self.outside_class, self.outside_class_display),) + out_classes", "def get_subclasses(self, class_name):\n return class_name.__subclasses__()", "def list_all_classes(self):\n classes = list(self.extended_class_only_graph.nodes())\n classes = [SchemaClass(_cls, self) for _cls in classes]\n return classes", "def classes(self):\n return self.browser.classes(self)", "def constructClassTable(G, classes):\n res = dict((c, set()) for c in classes)\n for v, data in G.nodes(data=True):\n c = data['class']\n if c in classes:\n res[c].add(v)\n return res", "def DCC_class_level(self, class_entity: und.Ent):\n others = list()\n if \"Interface\" in class_entity.kindname():\n return 0\n\n for ref in class_entity.refs(\"Define\", \"Variable\"):\n if ref.ent().type() in self.all_classes:\n others.append(ref.ent().type())\n\n kind_filter = \"Method ~Unknown ~Jar ~Library ~Constructor ~Implicit ~Lambda ~External\"\n for ref in class_entity.refs(\"Define\", kind_filter):\n for ref2 in ref.ent().refs(\"Java Define\", \"Java Parameter\"):\n if ref2.ent().type() in self.all_classes:\n others.append(ref2.ent().type())\n\n for ref in class_entity.refs(\"Define\", kind_filter):\n for ref2 in ref.ent().refs(\"Java Use Return\"):\n if ref2.ent().type() in self.all_classes:\n others.append(ref2.ent().type())\n\n return len(set(others))", "def __get_available_classes(self, package):\n modules = []\n classes = []\n \n # List through the modules in the specified package, ignoring __init__.py, and append them to a list.\n for f in os.listdir(package):\n if f.endswith('.py') and not f.startswith('__init__'):\n modules.append('{0}.{1}'.format(package, os.path.splitext(f)[0]))\n \n module_references = []\n \n # Attempt to import each module in turn so we can access its classes\n for module in modules:\n module_references.append(importlib.import_module(module))\n \n # Now loop through each module, looking at the classes within it - and then append each class to a list of valid classes.\n for module in module_references:\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj):\n classes.append((obj.__name__, obj))\n \n return classes", "def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)", "def get_best_references(self, header, include=None):\n if include is not None and self.filekind not in include:\n raise crexc.CrdsUnknownReftypeError(self.__class__.__name__, repr(self.basename),\n \"can only compute bestrefs for type\", repr(self.filekind), \"not\", include)\n bestref = self.get_best_ref(header)\n if bestref is not None:\n return { self.filekind : self.get_best_ref(header) }\n else:\n return {}", "def get_subclasses(cls, include_parents=False):\n subclasses = dict()\n for child in cls.__subclasses__():\n grandchildren = child.get_subclasses(include_parents)\n subclasses.update(grandchildren)\n if include_parents or not grandchildren:\n subclasses[child.__name__] = child\n\n return subclasses", "def get_all_lr_classes():\n lr_classes = {}\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(obj) and name != 'ABC':\n lr_classes[name] = obj\n return lr_classes", "def discover_classes(\n package,\n cls_match_func=trivial,\n module_match_func=trivial,\n):\n for module in discover_modules(package, module_match_func):\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n yield imported_class", "def FindChilds(self,cls):\n childs = []\n for i in self.classes:\n if self.InheritsFrom(cls,i):\n childs.append(i)\n if childs:\n return childs\n else:\n return False", "def get_classifications(self) -> list:\n return self.client.classifications.get_all()", "def getWordClasses(self):\n it = self._call_java('getWordClasses').toIterator()\n result = []\n while (it.hasNext()):\n result.append(it.next().toString())\n return result", "def get_class_list(self):\r\n modules = []\r\n classes = []\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n measures_path = os.path.join(path, 'measures')\r\n package_path = 'ruler.measures'\r\n\r\n # List through the modules in the specified package, ignoring __init__.py, and append them to a list.\r\n for f in os.listdir(measures_path):\r\n if f.endswith('.py') and not f.startswith('__init__'):\r\n modules.append('{0}.{1}'.format(package_path, os.path.splitext(f)[0]))\r\n\r\n module_references = []\r\n\r\n # Attempt to import each module in turn so we can access its classes\r\n for module in modules:\r\n module_references.append(importlib.import_module(module))\r\n\r\n # Now loop through each module, looking at the classes within it -\r\n # and then append each class to a list of valid classes.\r\n for module in module_references:\r\n for name, obj in inspect.getmembers(module):\r\n if inspect.isclass(obj):\r\n classes.append((obj.__name__, obj))\r\n\r\n return classes", "def deferred(ref):\n module, _ = ref.split(\".\", 1)\n if module in sys.modules:\n return _getcls(ref)\n\n @meta\n def check(cls):\n full_cls_mod = getattr(cls, \"__module__\", None)\n cls_module = full_cls_mod.split(\".\", 1)[0] if full_cls_mod else None\n if cls_module == module:\n return issubclass(cls, _getcls(ref))\n else:\n return False\n\n return check", "def visit_ClassDef(self, node):\n if node in self.manager.found_classes:\n return\n\n self.manager.found_classes.add(node)\n self.manager.found[\"classes\"].append({\"name\":node.name,\n \"lineno\":node.lineno,\n \"namespace\":\".\".join(self.parent)})\n\n # Keep checking all nodes in this class.\n for my_node in node.body:\n self.manager._explorer(self.manager, self.parent + [node.name]).visit(my_node)", "def coerce_class_names(classes):\n return [getattr(val, 'registry_name', val) for val in classes] \\\n if hasattr(classes, '__iter__') and not isinstance(classes, str) \\\n else getattr(classes, 'registry_name', classes)", "def class_types(self) -> Set[str]:\n # NOTE: This version is simple, but some dependent classes\n # (notably RawSegment) override this with something more\n # custom.\n return self._class_types", "def classes_from_build_path(build_path, targets):\n doxy_xml_path = os.path.join(build_path,'docs/build/doxygen/xml4rst')\n if not os.path.exists(doxy_xml_path):\n print('%s: Error, path \"%s\" does not exist.'%(sys.argv[0], doxy_xml_path))\n sys.exit(1)\n\n doxy_xml_files = []\n for component in targets:\n doxypath = os.path.join(doxy_xml_path, component)\n doxy_xml_files += glob(os.path.join(doxypath, 'class*.xml'))\n doxy_xml_files += glob(os.path.join(doxypath, 'struct*.xml'))\n\n # We want only classes that contain calls to the\n # ACCEPT_SERIALIZATION macro.\n serializable = './/memberdef/name[text()=\"ACCEPT_SERIALIZATION\"]'\n\n def pred(x):\n return len(x.xpath(serializable)) > 0\n\n return get_classes_conditional(doxy_xml_files, pred)", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def conjugacy_classes(self):\n identity = _af_new(list(range(self.degree)))\n known_elements = {identity}\n classes = [known_elements.copy()]\n\n for x in self.generate():\n if x not in known_elements:\n new_class = self.conjugacy_class(x)\n classes.append(new_class)\n known_elements.update(new_class)\n\n return classes", "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def get_field_class_with_validators(field, classes, include=None):\n\n if not include:\n include = []\n\n classes_list = include\n classes_list.extend(get_validators_for_field(field))\n if classes:\n if isinstance(classes, basestring):\n classes_list.extend(classes.split(' '))\n else:\n for c in classes:\n if isinstance(c, basestring):\n cs = c.split(' ')\n else:\n cs = c\n classes_list.extend(cs)\n return ' class=\"%s\"'%' '.join(classes_list)", "def get_labels(self, depth=None, set_transform=False):\n labels = libcopy.deepcopy(self.labels)\n if depth is None or depth > 0:\n for reference in self.references:\n if depth is None:\n next_depth = None\n else:\n next_depth = depth - 1\n labels.extend(reference.get_labels(next_depth, set_transform))\n return labels", "def collect_references(self):\n raise NotImplementedError('collect_references must be implemented in ASTSpriteGroup-subclass {:r}'.format(type(self)))", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")" ]
[ "0.6035254", "0.58814853", "0.5880051", "0.5856091", "0.57512456", "0.5681698", "0.5672593", "0.5591788", "0.5589396", "0.55738693", "0.55246097", "0.55189526", "0.5512281", "0.5482113", "0.54583037", "0.5448857", "0.54194885", "0.5409159", "0.53911316", "0.5388025", "0.53866357", "0.5350316", "0.5342173", "0.5330371", "0.5297328", "0.52816486", "0.5261616", "0.5258449", "0.52572185", "0.52572185", "0.52572185", "0.52572185", "0.52572185", "0.52572185", "0.52465504", "0.5245814", "0.524575", "0.5203037", "0.52009094", "0.51889706", "0.51527137", "0.5134545", "0.5129686", "0.51295006", "0.51294494", "0.5122093", "0.51148385", "0.5110517", "0.51033276", "0.5091925", "0.50844043", "0.50844043", "0.5051491", "0.5049002", "0.50388426", "0.5037746", "0.50361997", "0.50265574", "0.5026331", "0.5000983", "0.499073", "0.49872983", "0.49808827", "0.49802342", "0.49687704", "0.4959988", "0.49426475", "0.49309427", "0.49061412", "0.49057284", "0.48817012", "0.48747504", "0.48561844", "0.48487142", "0.4840297", "0.48357224", "0.48349956", "0.4828984", "0.48282388", "0.4825774", "0.48167416", "0.48109758", "0.4806602", "0.4805839", "0.47977936", "0.4797152", "0.47968107", "0.4794433", "0.47914717", "0.47855967", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813", "0.47851813" ]
0.7381208
0
return True if there is at least one validation error.
def has_validation_error(self): for err in self._schema.validator.validation_errors: if not err.warning: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isValid(self):\n errorList = self.getErrors()\n\n return not errorList", "def is_valid(self):\n return not self.errors", "def is_valid(self):\n self.clean()\n return not bool(self.errors)", "def has_errors(self):\n return len(self.get_errors()) > 0", "def has_errors(self) -> bool:\n return len(self.errors) > 0", "def is_valid(self) -> bool:\n return len(self.validate()) == 0", "def has_errors(self):\n\n return True if len(self.errors) > 0 else False", "def is_valid(self) -> bool:\n return self.errors == \"\"", "def has_errors(self) -> bool:\n if self.errors:\n return True\n return False", "def hasErrors(self):\n return False", "def has_errors(self) -> bool:", "def is_valid(self):\n self.errors = {}\n self._process_data()\n self._validate_changes()\n return not self.errors", "def validate(self):\n\n if self.validate_all_fields():\n return True\n return False", "def valid(self):\n return len(self.missing()) == 0", "def has_any(self) -> bool:\n if len(self.failures) == 1:\n return self.failures[0] != \"\"\n\n return len(self.failures) > 1", "def has_validation(self):\n return not self.get_node('//Validation') is None", "def isValid(self):\n def _isValid(obj):\n return obj.errors.isEmpty()\n return self.validate().addCallback(_isValid)", "def is_valid(self):\n return self.has_valid_values() and self.has_valid_sum()", "def has_errors(self):\n return self.exc_info != None", "def is_valid(self):\n try:\n self.validate()\n return True\n except (TypeError, ValueError) as e:\n return False", "def hasError(self) -> bool:\n return self.errorCode is not None and len(self.errorCode) > 0", "def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n if username_exists(cur, self.username.data):\n self.username.errors.append('This username already exists!')\n return False\n\n return True", "def isValid(self):\n return _libsbml.XMLError_isValid(self)", "def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False", "def valid(self):\n return len(self._totals_) <= 1", "def is_valid(self) -> bool:\n if self._validated is None:\n return self.validate(raise_error=False)\n return self._validated", "def is_valid(self):\n# import ipdb; ipdb.set_trace()\n return self.is_bound and not bool(self.errors)", "def has_error(self) -> bool:\n return self._has_error", "def valid(self):\n if not self._runs:\n return False\n return all([r.valid for r in self._runs])", "def is_valid(self) -> bool:\n return \\\n (self.spatial is None or all([v(self.spatial)\n for v, _ in self.spatial_validations])) \\\n and \\\n (self.temporal is None or all([v(self.temporal)\n for v, _ in self.temporal_validations]))", "def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False", "def has_error(self):\n return self.error_found", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True", "def is_valid(self, data_model: DataModel) -> bool:\n return all(constraint.is_valid(data_model) for constraint in self.constraints)", "def is_valid(self, data_model: DataModel) -> bool:\n return all(constraint.is_valid(data_model) for constraint in self.constraints)", "def is_error(self) -> bool:\n return not self.get_error()", "def is_error(self) -> bool:\n return not self.get_error()", "def failed(self):\n return len(self.failed_outputs) > 0 or len(self.errors) > 0", "def checkAnalysis(self) -> bool:\n\n if len(self.materials) == 0:\n raise AnalysisError('No material models have been assigned to the analysis')\n\n for material in self.materials:\n if not material.isValid():\n raise AnalysisError('Material ({:s}) is not valid'.format(material.name))\n\n\n return True", "def is_valid(self, raise_exception=False) -> bool:\n super().is_valid(raise_exception=raise_exception)\n if not self.initial_data.get('data') and \\\n (not self.initial_data.get('batch_id')\n or (self.initial_data.get('batch_id')\n and not self.initial_data.get('eob'))):\n self._errors = {\n 'data': 'data has to be provided if batch_id is not provided'\n 'or batch_id is provided and eob is False'\n }\n\n if self._errors and raise_exception:\n raise serializers.ValidationError(self.errors)\n return not bool(self._errors)", "def has_failed(self):\n return self._error is not None", "def isValid(self):\n return self.valid", "def HasErrors(self):\n for name in self._GetStreamNames():\n if name.startswith('error_data.'):\n return True\n\n return False", "def has_errors( self ) :\n for e in self._errlist :\n if e.svr in (starobj.Error.CRIT, starobj.Error.ERR) :\n return True\n return False", "def check_validation(self):\n validation = self.validation\n logger = self.logger\n if validation is not None:\n internal_pass = validation['internal_pass']\n baseline_pass = validation['baseline_pass']\n\n both_pass = True\n if internal_pass is not None and not internal_pass:\n logger.error('Comparison failed between files within the test '\n 'case.')\n both_pass = False\n\n if baseline_pass is not None and not baseline_pass:\n logger.error('Comparison failed between the test case and the '\n 'baseline.')\n both_pass = False\n\n if both_pass:\n raise ValueError('Comparison failed, see above.')", "def have_error(self):\n return (hasattr(self, \"got_error\") and\n self.got_error)", "def is_valid(self, data_model: DataModel) -> bool:\n if data_model is None:\n return True\n\n return all(c.is_valid(data_model) for c in self.constraints)", "def is_valid(self):\n return self.scenario.is_valid()", "def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True", "def valid(self) -> bool:\n pass", "def valid(self) -> bool:\n are_populated = [bool(getattr(self, fld_nm)) for fld_nm in self.necessary_fields]\n return all(are_populated)", "def valid(self) -> bool:\n return True", "def isValid(self):\n return self._valid", "def clean(self):\n super(RequireOneFormSet, self).clean()\n for error in self.errors:\n if error:\n return\n completed = 0\n for cleaned_data in self.cleaned_data:\n # form has data and we aren't deleting it.\n if cleaned_data and not cleaned_data.get('DELETE', False):\n completed += 1\n\n if completed < 1:\n raise forms.ValidationError(\"At least one %s is required.\" %\n self.model._meta.object_name.lower())", "def IsValid(self):\n return False", "def has_errors_fatal(self) -> bool:\n return len(self.errors_fatal) > 0", "def valid(self):\n\t\tfor k, v in self.rules.items():\n\t\t\tfor i in v:\n\t\t\t\tif any([self.valid_rule_1(i), self.valid_rule_2(i), self.valid_rule_3(k, i)]):\n\t\t\t\t\t# print(\"Got a pass\")\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# print(\"Got a fail\")\n\t\t\t\t\treturn False\n\t\t# print(\"CORRECT CFG\")\n\t\treturn True", "def validate(self, data):\n return any(imap(lambda validator: validate_common(validator, data), self.validators))", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def check_for_validator_error(\n errors: Tuple[str, ...], validators: Tuple[ANY_VALIDATOR, ...] = ALL_VALIDATORS\n) -> bool:\n return any(validator.ERROR in errors for validator in validators)", "def IsValid(self):\r\n \r\n return self._valid", "def IsValid(self):\r\n \r\n return self._valid", "def check_single_excitation(self):\n\n if len(self.exc_dict.keys()) != 1:\n return False\n for key in self.exc_dict.keys():\n if len(self.exc_dict[key]) != 1:\n return False\n return True", "def valid(self):\n if (self._npix == []\n or self._gpix == []\n or self._epix == []\n or self._ppix == []) :\n return False\n return True", "def validate_all_fields(self):\n\n if self.validate_byr() and \\\n self.validate_iyr() and \\\n self.validate_eyr() and \\\n self.validate_hgt() and \\\n self.validate_hcl() and \\\n self.validate_ecl() and \\\n self.validate_pid() and \\\n self.validate_cid():\n return True\n return False", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if not email_exists(cur, self.email.data):\n self.email.errors.append('Please check your email address.')\n return False\n\n return True", "def validate(self) -> bool:\n required = self.crud.validate(required=True)\n if required:\n raise ValueError(\n f\"Validation error. Required destination fields are not present in the crosswalk: {required}\"\n )", "def is_valid(self) -> bool:\n if self.total <= 1:\n # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this\n # function probably won't be called if there are 0 fixes, but 0 is\n # valid; it simply means \"no fixes to apply\".\n return True\n if self.total == 2:\n # This is only OK for this special case. We allow this because\n # the intent is clear (i.e. no conflict): Insert something *before*\n # the segment and something else *after* the segment.\n return self.create_before == 1 and self.create_after == 1\n # Definitely bad if > 2.\n return False # pragma: no cover", "def is_valid(self, data_model: DataModel) -> bool:\n return self.constraint.is_valid(data_model)", "def validate(self):\n if self.id == None:\n return False\n return self.validator.validate(self.fields)", "def has_validation_docs(self):\n pass", "def __bool__(self):\n return not self.err", "def valid(self, order=None):\n return self.is_valid", "def is_valid(self):\n return self._is_valid", "def check_errors(self) -> None:", "def validate(self):\n return self.validator.validate(self.fields)", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def succeed(self) -> bool:\n return self.errorCode is None or len(self.errorCode) < 1", "def validate(self):\n return self._validate_variable(self._value_forced) and \\\n self._validate_variable(self._value_calc)", "def is_valid(self):\n num_lines = len(self.lines)\n if self.statement.conclusion != self.lines[num_lines-1].conclusion:\n return False\n for line_num in range(num_lines):\n line = self.lines[line_num]\n if line.rule is None:\n if line.conclusion not in self.statement.assumptions:\n return False\n if (line.rule is not None and not self.instance_for_line(line_num).is_instance_of(self.rules[line.rule])) \\\n or (line.justification is not None and any(i >= line_num for i in line.justification)):\n return False\n\n return True", "def checkValid(self):\n if (self.noteName is not None) and (self.accidental is not None) and (self.octave is not None):\n return True\n else:\n return False", "def validate(self):\n for field in self.fields:\n if field.validate():\n self.model.set(field.name, field.model_value)\n else:\n self.errors.append(field.error())\n return len(self.errors) == 0", "def has_error(self):\n return self.status == 'OK'", "def check(self):\n\n if not self.target.ok():\n return False\n\n if not self.progid.ok():\n return False\n\n if not self.prinapp.ok():\n return False\n\n if not self.observers.ok():\n return False\n\n return True", "def validate(self):\n validated = True \n # Check that all parameters exist in the self.parameters dictionary\n for param_name in self._SCALAR_PARAMETERS:\n if param_name not in self.parameters:\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False \n \n for param_name in self._TABLE_PARAMETERS:\n if not all([elem for elem in self.parameters[param_name]]):\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False\n \n return validated", "def _get_validation_status(self) -> bool:\n\n try:\n for x in self.make_query(\"trust-anchors/statuses\"):\n if x[\"completedValidation\"] is False:\n # If anything has not been validated return false\n return False\n # All are validated. Return true\n return True\n except urllib.error.URLError as e:\n self._wait(60, \"Connection was refused\")\n return False", "def is_valid(self):\n for lineedit in self.lineedits:\n if lineedit in self.validate_data and lineedit.isEnabled():\n validator, invalid_msg = self.validate_data[lineedit]\n text = to_text_string(lineedit.text())\n if not validator(text):\n QMessageBox.critical(self, self.get_name(),\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\n QMessageBox.Ok)\n return False\n return True", "def is_valid(self) -> bool:\n if not self.list_path:\n raise ValueError(\"Data must be loaded before validation\")\n\n return self._check()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def validate(self):\n\n\tmissing = []\n\tbadcheck = []\n\tfor name, checkfunc, params in self._required:\n\t try:\n\t\targ = self.make_required(name)\n\t\tif checkfunc is not None:\n\t\t if params is not None:\n\t\t\tparams = (self.param_map[name], arg) + params\n\t\t else:\n\t\t\tparams = (self.param_map[name], arg)\n\t\t try:\n\t\t\tapply(checkfunc, params)\n\t\t except ValidationError, msg:\n\t\t\tbadcheck.append(msg)\n\t except ValidationError, args:\n\t\tmissing.append(args)\n\n\tfor (name, checkfunc, params) in self._optional:\n\t tup = self.make_optional(name)\n\t if tup and checkfunc is not None:\n\t\tif params is not None:\n\t\t params = (self.param_map[name], tup) + params\n\t\telse:\n\t\t params = (self.param_map[name], tup)\n\t\ttry:\n\t\t apply(checkfunc, params)\n\t\texcept ValidationError, msg:\n\t\t badcheck.append(msg)\n\n\tif (missing or badcheck) and self.log_errors:\n\t self.log_error(missing, badcheck)\n\n\tif (missing or badcheck) and self.generate_error_page:\n\t self.generate_HTML(missing, badcheck)\n\n\tself.missing = missing\n\tself.badcheck = badcheck\n\n\treturn not (missing or badcheck)", "def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())", "def is_valid(self):\n return self._valid", "def checkObservation(self):\n if (self.independentVariable is not None \n and self.observation is not None \n and self.observationError is not None):\n l = len(self.independentVariable)\n if (l == len(self.observation) and l == len(self.observationError)):\n return True\n return False", "def validation_required(self):\n return self._validation_required", "def is_error(self):\n\n return self._error_message is not None", "def validate(self):\n return 1", "def is_error(self) -> bool:\n return self.is_client_error() or self.is_server_error()", "def valid(self):\n return self._valid" ]
[ "0.79174244", "0.7894807", "0.7860656", "0.7844026", "0.78179806", "0.78001493", "0.77460116", "0.76075137", "0.7494825", "0.7405556", "0.7374498", "0.73029155", "0.7184244", "0.714132", "0.7135738", "0.7081772", "0.7046217", "0.7024928", "0.69485885", "0.6942181", "0.6913216", "0.6864033", "0.6829887", "0.6774124", "0.67516434", "0.67036074", "0.66549385", "0.6652012", "0.66401833", "0.6589429", "0.6560662", "0.65562826", "0.65523535", "0.6499688", "0.6489775", "0.64792794", "0.64792794", "0.64761364", "0.64761364", "0.6471817", "0.64635116", "0.6461455", "0.64490795", "0.6445785", "0.643509", "0.642345", "0.6413803", "0.64007205", "0.6384709", "0.6382203", "0.63801366", "0.63752365", "0.6366825", "0.63343716", "0.6321363", "0.6314908", "0.62836957", "0.627573", "0.625297", "0.6247023", "0.62467945", "0.6240236", "0.6217289", "0.6217289", "0.6210116", "0.6207963", "0.6185595", "0.61805373", "0.6174903", "0.61704296", "0.61647576", "0.61635184", "0.61503035", "0.6135695", "0.61292386", "0.6110903", "0.61095834", "0.61063516", "0.6103686", "0.6098882", "0.60962784", "0.6088307", "0.6086112", "0.6084679", "0.60839576", "0.6075923", "0.607394", "0.6068718", "0.6067592", "0.6063562", "0.6056795", "0.60561585", "0.6055331", "0.60533845", "0.6050518", "0.6039032", "0.6035114", "0.602794", "0.60259783", "0.6024367" ]
0.78778917
2
return validation errors as a list of dictionaries
def get_validation_errors(self): return [err.to_dict() for err in self._schema.validator.validation_errors]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors", "def filter_validation_errors(errors):\n error_messages = []\n for field, msgs in errors.items():\n if isinstance(msgs, dict):\n for f, m in msgs.items():\n error_messages.append(dict(\n field=f,\n message=m,\n code=error_codes['validation_error'],\n ))\n else:\n error_messages.append(dict(\n field=field,\n message=msgs,\n code=error_codes['validation_error'],\n ))\n return error_messages", "def validation_errors(self):\n return self._validation_errors", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def errors(self) -> List[Error]:", "def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{field} : {error}')\n return errorMessages", "def json(self):\n d = [err.json for err in self.errors]\n return d", "def getErrorsList(self):\n return self.__errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def _pydantic_errors_to_validation_results(\n errors: list[dict | Exception] | ValidationError,\n file_path: Path,\n scope: Scope,\n) -> list[ValidationResult]:\n out = []\n for e in (\n errors.errors() if isinstance(errors, ValidationError) else cast(list, errors)\n ):\n if isinstance(e, Exception):\n message = getattr(e, \"message\", str(e))\n id = \"exception\"\n scope = Scope.FILE\n else:\n id = \".\".join(\n filter(\n bool,\n (\n \"dandischema\",\n e.get(\"type\", \"UNKNOWN\"),\n \"+\".join(e.get(\"loc\", [])),\n ),\n )\n )\n message = e.get(\"message\", e.get(\"msg\", None))\n out.append(\n ValidationResult(\n origin=ValidationOrigin(\n name=\"dandischema\",\n version=dandischema.__version__,\n ),\n severity=Severity.ERROR,\n id=id,\n scope=scope,\n path=file_path,\n message=message,\n # TODO? dataset_path=dataset_path,\n # TODO? dandiset_path=dandiset_path,\n )\n )\n return out", "def getErrors(self) -> java.util.Collection:\n ...", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def get_validation_errors(\n self,\n schema_version: Optional[str] = None,\n devel_debug: bool = False,\n ) -> list[ValidationResult]:\n ...", "def GetAll(self):\n return self._errors.copy()", "def validations(self):\n return self.container['validations']", "def getErrors(self):\n return self.errors", "def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors", "def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors", "def errors(self):\n return self._errors", "def errors(self):\n return self.__errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()", "def _validation_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_validate\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_validate[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(predictions, targets)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def to_dict(self):\n res = self.data\n if self._errors:\n res[\"errors\"] = self._errors\n return res", "def filter_draft_errors(result):\n error_messages = []\n for field, msgs in result.get('messages', {}).items():\n if msgs.get('state', None) == 'error':\n for m in msgs['messages']:\n error_messages.append(dict(\n field=field,\n message=m,\n code=error_codes['validation_error'],\n ))\n return error_messages", "def validation(self, tokens):\n return self.process_value_pairs(tokens, \"validation\")", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def get_errored_courses(self):\r\n return dict((k, self.errored_courses[k].errors) for k in self.errored_courses)", "def derive_error_dicts(self, error_obj_list):\n results = []\n for error_obj in error_obj_list:\n if error_obj:\n results.append(self.derive_error_dict(error_obj))\n return results", "def AsJson(self):\n\n return json.dumps(self._errors)", "def getValidations(self):\n return self.objectValues('InstrumentValidation')", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def Errors(self):\n return self._get_attribute('errors')", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def errors(self):\n return self._properties.get(\"errors\")", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def all_errors(self) -> List[XMLSchemaParseError]:\n errors = []\n for comp in self.iter_components():\n if comp.errors:\n errors.extend(comp.errors)\n return errors", "def render_errors(form):\n return {\n \"form\": form\n }", "def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...", "def to_dict_impl(cls, self: 'ErrorsAndWarnings') -> Dict[str, Any]:\n # See comment above.\n return {'errors': [e.to_dict() for e in self._errors.values() # pylint: disable=protected-access\n if e.is_persistant]}", "def get_field_errors(self, field):\r\n identifier = format_html('{0}.{1}', self.form_name, field.name)\r\n return self.error_class([SafeTuple((identifier, '$pristine', '$pristine', 'invalid', e))\r\n for e in self.errors.get(field.name, [])])", "def get_validation_errors(response, field, index=0):\n assert response.status_code == 400\n i = 0\n for error in response.data[\"invalid_params\"]:\n if error[\"name\"] != field:\n continue\n\n if i == index:\n return error\n\n i += 1", "def errors(self) -> List[Error]:\n # May have inherited errors with a different path.\n for error in self._errors.values():\n error.path = self.path\n if self.is_removed: # Mark all of our errors as non-persistant.\n error.is_persistant = False\n return list(self._errors.values())", "def validations(self) -> Sequence['outputs.Validation']:\n return pulumi.get(self, \"validations\")", "def _validate(self) -> typing.List[str]:\n return jsii.invoke(self, \"validate\", [])", "def get_validation_errors(\n self,\n schema_version: Optional[str] = None,\n devel_debug: bool = False,\n ) -> list[ValidationResult]:\n errors: list[ValidationResult] = pynwb_validate(\n self.filepath, devel_debug=devel_debug\n )\n if schema_version is not None:\n errors.extend(\n super().get_validation_errors(\n schema_version=schema_version, devel_debug=devel_debug\n )\n )\n else:\n # make sure that we have some basic metadata fields we require\n try:\n origin = ValidationOrigin(\n name=\"nwbinspector\",\n version=str(_get_nwb_inspector_version()),\n )\n\n for error in inspect_nwbfile(\n nwbfile_path=self.filepath,\n skip_validate=True,\n config=load_config(filepath_or_keyword=\"dandi\"),\n importance_threshold=Importance.BEST_PRACTICE_VIOLATION,\n # we might want to switch to a lower threshold once nwbinspector\n # upstream reporting issues are clarified:\n # https://github.com/dandi/dandi-cli/pull/1162#issuecomment-1322238896\n # importance_threshold=Importance.BEST_PRACTICE_SUGGESTION,\n ):\n severity = NWBI_IMPORTANCE_TO_DANDI_SEVERITY[error.importance.name]\n kw: Any = {}\n if error.location:\n kw[\"within_asset_paths\"] = {\n error.file_path: error.location,\n }\n errors.append(\n ValidationResult(\n origin=origin,\n severity=severity,\n id=f\"NWBI.{error.check_function_name}\",\n scope=Scope.FILE,\n path=Path(error.file_path),\n message=error.message,\n # Assuming multiple sessions per multiple subjects,\n # otherwise nesting level might differ\n dataset_path=Path(error.file_path).parent.parent, # TODO\n dandiset_path=Path(error.file_path).parent, # TODO\n **kw,\n )\n )\n except Exception as e:\n if devel_debug:\n raise\n # TODO: might reraise instead of making it into an error\n return _pydantic_errors_to_validation_results(\n [e], self.filepath, scope=Scope.FILE\n )\n\n from dandi.organize import validate_organized_path\n\n from .bids import NWBBIDSAsset\n\n if not isinstance(self, NWBBIDSAsset) and self.dandiset_path is not None:\n errors.extend(\n validate_organized_path(self.path, self.filepath, self.dandiset_path)\n )\n return errors", "def create_return_dict_validator(self):\n return {\n 'count': {'type': 'integer', 'required': True, 'empty': False},\n 'rows': {'type': 'list', 'required': True, 'schema': {'type': 'dict'}}\n }", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def _flatten_errors(self, params, parent=None):\r\n data = OrderedDict()\r\n for key, val in params.items():\r\n full_key = parent + \"[\" + key + \"]\" if parent else key\r\n if full_key.endswith(\"[errors]\"):\r\n full_key = full_key[:-len(\"[errors]\")]\r\n if isinstance(val, dict):\r\n data.update(self._flatten_errors(val, full_key))\r\n elif key == \"errors\":\r\n for error in val:\r\n data[full_key + \"[\" + error[\"attribute\"] + \"]\"] = [error[\"message\"]]\r\n else:\r\n data[full_key] = [val]\r\n return data", "def validate(cls, data, errors):", "def form_errors(form):\n errors = {}\n max_name_length = Item.name.property.columns[0].type.length\n if not form.get('name', None):\n errors['name'] = 'Please enter a name.'\n elif len(form['name']) > max_name_length:\n errors['name'] = (\n 'Name must be less than %s characters.' % max_name_length\n )\n if not Catagory.exists(form.get('catagory', None)):\n errors['catagory'] = 'Not a valid catagory.'\n if not form.get('description', None):\n errors['description'] = 'Please enter a description.'\n return errors", "def getParseErrors(self):\n return [x for x in self.xeps if x.parseErrors]", "def get_field_errors(self, bound_field):\r\n errors = super(NgFormValidationMixin, self).get_field_errors(bound_field)\r\n identifier = format_html('{0}.{1}', self.form_name, self.add_prefix(bound_field.name))\r\n errors_function = '{0}_angular_errors'.format(bound_field.field.__class__.__name__)\r\n try:\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, errors_function)\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n except (TypeError, AttributeError):\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors')\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n errors.append(SafeTuple((identifier, '$dirty', '$valid', 'valid', ''))) # for valid fields\r\n errors.extend([SafeTuple((identifier, '$dirty', pe[0], 'invalid', force_text(pe[1])))\r\n for pe in potential_errors])\r\n return errors", "def errors(self):\n raise NotImplementedError", "def validate(self):\n errors = {}\n for typ, items in self._items.iteritems():\n for name, spec in items.iteritems():\n assert hasattr(spec, 'validate'), 'Does %s:%s descend from FrodoBase?' % (name, spec)\n spec_errors = spec.validate()\n if spec_errors:\n errors[name] = spec_errors\n return errors\n\n # sys.modules[__name__] = Configuration()", "def errors(self):\n return self.args[1]", "def validate(self, value):\n errors = []\n\n try:\n v = validate_email(value) # validate and get info\n email = v[\"email\"] # replace with normalized form\n except EmailNotValidError as e:\n # email is not valid, exception message is human-readable\n errors.append(str(e))\n\n return errors", "def error(self) -> list:\n return self.__err", "def validate(self, arg):\n new_values = []\n for i in self.cast(arg):\n# new_values.append(self.checkValues(i))\n new_values.append(self.template.validate(i))\n return new_values", "def _get_form_error(self):\n errors = {}\n if self._form_error:\n errors[\"base\"] = self._form_error\n self._form_error = None\n return errors", "def _validate(this, validators):\n for val_key, validator in validators.items():\n if isinstance(val_key, tuple):\n field_value, field_name = (getitem(this, val_key[0], None), val_key[0])\n if field_value != val_key[1]:\n continue\n else:\n _validate(this, validator)\n continue\n else:\n field_value, field_name = (getitem(this, val_key, None), val_key)\n if hasattr(field_value, 'validate') and isinstance(validator, dict):\n field_value.validate(validator)\n setitem(this, 'validation_errors', dict(this.validation_errors, **field_value.validation_errors))\n elif isinstance(validator, dict):\n _validate(getitem(this, val_key, None), validator)\n else:\n val_errors = [validation_runner(funkidator[0], field_name, field_value, funkidator[1:]) for funkidator in validator]\n setitem(this, 'validation_errors', dict(getitem(this, 'validation_errors', {}), **dict((key, val) for item in val_errors for key, val in item.items())))", "def getErrors(self):\n errorList = []\n\n # E0\n try:\n if not self.e0.isValid():\n errorList.append(\"Invalid first error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No first error axis in ErrorEllipse Class.\")\n\n # E1\n try:\n if not self.e1.isValid():\n errorList.append(\"Invalid second error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No second error axis in ErrorEllipse Class.\")\n\n # E2\n try:\n if not self.e2.isValid():\n errorList.append(\"Invalid third error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No third error axis in ErrorEllipse Class.\")\n\n # maximumHorizontalProjection\n try:\n self.maximumHorizontalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumHorizontalProjection in ErrorEllipse Class.\")\n\n # maximumVerticalProjection\n try:\n self.maximumVerticalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumVerticalProjection in ErrorEllipse Class\")\n\n # equivalentHorizontalRadius\n try:\n self.equivalentHorizontalRadius\n except (NameError, AttributeError):\n errorList.append(\"No EquivalentHorizontalRadius in ErrorEllipse class\")\n\n return errorList", "def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None", "def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs", "def validate(self) -> list:\n # If it has been validated before\n if self._validated:\n # return current list of issues\n return self._validation_issues\n\n # Has not been validated before\n validation_issues = []\n # Validate pipeline schema version\n if \"version\" not in self._pipeline_definition:\n validation_issues.append(\"Pipeline schema version field is missing.\")\n elif not isinstance(self._pipeline_definition[\"version\"], str):\n validation_issues.append(\"Pipeline schema version field should be a string.\")\n\n # Validate pipelines\n if \"pipelines\" not in self._pipeline_definition:\n validation_issues.append(\"Pipeline is missing 'pipelines' field.\")\n elif not isinstance(self._pipeline_definition[\"pipelines\"], list):\n validation_issues.append(\"Field 'pipelines' should be a list.\")\n elif len(self._pipeline_definition[\"pipelines\"]) == 0:\n validation_issues.append(\"Pipeline has zero length 'pipelines' field.\")\n\n # Validate primary pipeline\n if \"primary_pipeline\" not in self._pipeline_definition:\n validation_issues.append(\"Could not determine the primary pipeline.\")\n elif not isinstance(self._pipeline_definition[\"primary_pipeline\"], str):\n validation_issues.append(\"Field 'primary_pipeline' should be a string.\")\n\n primary_pipeline = self.get_pipeline_definition(self._pipeline_definition.get(\"primary_pipeline\"))\n if not primary_pipeline:\n validation_issues.append(\"No primary pipeline was found\")\n else:\n primary_pipeline = primary_pipeline.to_dict()\n # Validate primary pipeline structure\n if \"app_data\" not in primary_pipeline:\n validation_issues.append(\"Primary pipeline is missing the 'app_data' field.\")\n else:\n if \"version\" not in primary_pipeline[\"app_data\"]:\n validation_issues.append(\"Primary pipeline is missing the 'version' field.\")\n if \"properties\" not in primary_pipeline[\"app_data\"]:\n validation_issues.append(\"Node is missing 'properties' field.\")\n elif len(primary_pipeline[\"app_data\"][\"properties\"]) == 0:\n validation_issues.append(\"Pipeline has zero length 'properties' field.\")\n\n if \"nodes\" not in primary_pipeline or len(primary_pipeline[\"nodes\"]) == 0:\n validation_issues.append(\"At least one node must exist in the primary pipeline.\")\n else:\n for node in primary_pipeline[\"nodes\"]:\n if \"component_parameters\" not in node[\"app_data\"]:\n validation_issues.append(\"Node is missing 'component_parameters' field\")\n\n return validation_issues", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def get_wtf_errors(self, wtf_errors):\n\t\tmessages = []\n\t\tmessages.append('<ol class=\"wtf-errors\">')\n\t\tfor field, errors in wtf_errors.iteritems():\n\t\t\tmessages.append(\"<li>\"+field+\": <br />\")\n\t\t\tfor error in errors:\n\t\t\t\tmessages.append(\"&mdash; \"+error+ \"<br />\")\n\t\t\tmessages.append(\"</li>\")\n\t\tmessages.append(\"</ol>\")\n\t\treturn \"\".join(messages)", "def validation(self):\n validation_info = {}\n for _doc in self.schema_extension_only['@graph']:\n if \"$validation\" in _doc:\n data = _doc[\"$validation\"]\n if \"definitions\" in _doc[\"$validation\"]:\n data = expand_ref(data, _doc[\"$validation\"][\"definitions\"])\n validation_info[_doc[\"@id\"]] = data\n return validation_info", "def extra_validation(\n self, process_graph: dict, env: EvalEnv, result, source_constraints: List[SourceConstraint]\n ) -> Iterable[dict]:\n return []", "def _get_retriable_errors(out: List[str]) -> List[str]:\n return [\n line for line in out\n if any(error in line for error in RETRIABLE_ERRORS)\n ]", "def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None", "def get_validation_data(self):\n return self.unzip_batch(self.valid)", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def validate_to_python(self, value):\n super(ListOfDictField, self).validate(value)\n if value == None:\n return []\n if not isinstance(value, (list, tuple)):\n raise ValidationError('Must be a list or tuple, got {0}'.format(type(value).__name__))\n cleaned = []\n for index, dct in enumerate(value):\n if not isinstance(dct, dict):\n raise ValidationError('Item {0}: Must be a list of dicts, got {1}'.format(index, type(value)))\n form = self.Form(dct)\n if form.is_valid():\n cleaned.append(form.cleaned_data)\n else:\n errors = form.errors.as_text()\n raise ValidationError('Item {0}: Invalid format:\\n{1}'.format(index, errors))\n return cleaned", "def getBuildErrors(self):\n return [x for x in self.xeps if x.buildErrors]", "def _run_extra_validators(self, data):\n errors = defaultdict(list)\n for validator in self.get_extra_validators():\n validator.set_instance(self.instance)\n try:\n validator(data)\n except ValidationError as exc:\n for field, field_errors in exc.detail.items():\n errors[field] += field_errors\n return errors", "def check_set_errors(self):\n response = self.read()\n return [] if response == \"\" else [response]", "def get_validate(self) -> dict:\n response = self.rc.execute(\"GET\",\n self._get_uri(GET_VALIDATE_URI),\n headers=self.header,\n verify=self.verify)\n return response.json()", "def errors():\n return THE_LOGGER.errors", "def error_map(self):\n return self._error_map", "def _parse_file_errors(self, file):\n\n with open(file, 'r') as lines:\n errors = []\n\n for line in lines:\n if self.log_regexp.TRACEBACK.match(line):\n errors.append({})\n\n if self.log_regexp.ERROR_PLACE.match(line):\n code_line = re.search(self.log_regexp.ERROR_LINE, line)[0]\n error_file = re.search(self.log_regexp.ERROR_FILE, line)[0]\n errors[-1][self.output_params.LINE] = int(code_line.split()[1])\n errors[-1][self.output_params.ERROR_FILE] = error_file.strip('\"')\n\n if self.log_regexp.ERROR_TYPE.match(line):\n type_error = re.search(self.log_regexp.ERROR_TYPE, line)[0]\n message = line[len(type_error)+1:]\n errors[-1].update({self.output_params.TYPE: type_error})\n errors[-1].update({self.output_params.MESSAGE: message.strip()})\n\n return errors", "def getAll(self):\n x,y,a = self.getxya()\t\n xerrs = [self.errors[0][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\n yerrs = [self.errors[1][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\t\n return x,y,a,xerrs,yerrs", "def extract_form_errors(html):\n errors = re.findall('<ul class=\"errorlist\"(.*)</ul>',\n html,\n re.IGNORECASE)\n \n return errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def _training_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_train\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_train[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(targets, predictions)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request" ]
[ "0.7443273", "0.7419965", "0.7331665", "0.7269884", "0.7269884", "0.7269884", "0.7269884", "0.7269884", "0.72689366", "0.72655326", "0.7202294", "0.7177334", "0.7172576", "0.7163961", "0.7127716", "0.6941691", "0.6911603", "0.6893204", "0.6834352", "0.6751597", "0.6711623", "0.6692114", "0.65932", "0.6580191", "0.65579456", "0.6547798", "0.6542113", "0.6536023", "0.6536023", "0.6513981", "0.65088105", "0.65040374", "0.6498194", "0.64897037", "0.6471132", "0.6465412", "0.6460422", "0.64188963", "0.64180636", "0.64159364", "0.6409723", "0.6372795", "0.6364486", "0.6353769", "0.6346426", "0.63403416", "0.6328817", "0.63133454", "0.6306431", "0.6300808", "0.6291286", "0.6279903", "0.6197063", "0.6189441", "0.61880106", "0.6184843", "0.6180694", "0.6174439", "0.616913", "0.61590993", "0.61590993", "0.6155709", "0.6154123", "0.61394304", "0.6135869", "0.61093223", "0.6098295", "0.6088429", "0.6086743", "0.6078478", "0.60707754", "0.60596794", "0.6059373", "0.6043961", "0.60430604", "0.6032961", "0.60321665", "0.6015012", "0.6004922", "0.5999423", "0.5997008", "0.59828395", "0.5980423", "0.595957", "0.595767", "0.5949186", "0.59448415", "0.59262645", "0.59194094", "0.5917437", "0.59058225", "0.5853256", "0.58335304", "0.5821969", "0.581414", "0.5793138", "0.57838666", "0.5769592", "0.5760641", "0.5755052" ]
0.84701467
0
Faster Wavelenght selector If passed lists it will return lists. If passed np arrays it will return arrays Fastest is using np.ndarrays fast_wav_selector ~10002000 quicker than wav_selector
def fast_wav_selector(wav, flux, wav_min, wav_max): if isinstance(wav, list): # if passed lists wav_sel = [value for value in wav if(wav_min < value < wav_max)] flux_sel = [value[1] for value in zip(wav,flux) if(wav_min < value[0] < wav_max)] elif isinstance(wav, np.ndarray): # Super Fast masking with numpy mask = (wav > wav_min) & (wav < wav_max) wav_sel = wav[mask] flux_sel = flux[mask] else: raise TypeError("Unsupported input wav type") return [wav_sel, flux_sel]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wav_selector(wav, flux, wav_min, wav_max, verbose=False):\n if isinstance(wav, list): # if passed lists\n wav_sel = [wav_val for wav_val in wav if (wav_min < wav_val < wav_max)]\n flux_sel = [flux_val for wav_val, flux_val in zip(wav,flux) if (wav_min < wav_val < wav_max)]\n elif isinstance(wav, np.ndarray):\n # Super Fast masking with numpy\n mask = (wav > wav_min) & (wav < wav_max)\n if verbose:\n print(\"mask=\", mask)\n print(\"len(mask)\", len(mask))\n print(\"wav\", wav)\n print(\"flux\", flux)\n wav_sel = wav[mask]\n flux_sel = flux[mask]\n else:\n raise TypeError(\"Unsupported input wav type\")\n return [wav_sel, flux_sel]", "def GetSpectraFromIndexList(all_wl,all_spectra,idx_list):\n NBSPEC=len(all_spectra)\n \n \n all_wl_sel=[]\n all_spectra_sel=[]\n \n for idx in np.arange(0,NBSPEC):\n if idx in idx_list:\n all_wl_sel.append(all_wl[idx])\n all_spectra_sel.append(all_spectra[idx])\n return all_wl_sel,all_spectra_sel", "def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def wav_reader(directory):\n wav_list = find_wavs(directory)\n res_list = []\n\n for wav in wav_list:\n temp_list = [wav]\n\n if re.match(r'.*target1.*\\.wav$', wav):\n temp_list.append(True)\n else:\n temp_list.append(False)\n\n res_list.append(tuple(temp_list))\n\n return res_list", "def torch_calc_spectrograms(waves, window_lengths, spectral_diffs=(0, 1),\r\n window_name='hann', use_mel_scale=True,\r\n proj_method='matmul', num_spec_bins=256,\r\n random_crop=True):\r\n # waves = [tf.squeeze(w, axis=-1) for w in waves]\r\n waves = [torch.squeeze(w, dim=-1) for w in waves]\r\n\r\n if window_name == 'hann':\r\n # windows = [tf.reshape(tf.signal.hann_window(wl, periodic=False), [1, 1, -1])\r\n # for wl in window_lengths]\r\n windows = [torch.reshape(torch.from_numpy(W.hann(wl)), [1, 1, -1])\r\n for wl in window_lengths]\r\n elif window_name is None:\r\n windows = [None] * len(window_lengths)\r\n else:\r\n raise ValueError('Unknown window function (%s).' % window_name)\r\n\r\n spec_len_wave = []\r\n for d in spectral_diffs:\r\n for length, window in zip(window_lengths, windows):\r\n\r\n wave_crops = waves\r\n for _ in range(d):\r\n wave_crops = [w[:, 1:] - w[:, :-1] for w in wave_crops]\r\n\r\n if random_crop:\r\n # wave_crops = aligned_random_crop(wave_crops, length)\r\n wave_crops = torch_aligned_random_crop(wave_crops, length)\r\n\r\n # frames = [tf.signal.frame(wc, length, length // 2) for wc in wave_crops]\r\n frames = [torch.tensor(librosa.util.frame(wc.numpy(),length,length//2)) for wc in wave_crops]\r\n # TODO: Whether this method is feasible (in the gradient part) remains to be verified\r\n if window is not None:\r\n frames = [f * window for f in frames]\r\n\r\n if proj_method == 'fft':\r\n # ffts = [tf.signal.rfft(f)[:, :, 1:] for f in frames]\r\n ffts = [torch.rfft(f,signal_ndim=1)[:, :, 1:] for f in frames]\r\n elif proj_method == 'matmul':\r\n # mat = get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n # use_mel_scale=use_mel_scale)\r\n # ffts = [matmul_real_with_complex(f, mat) for f in frames]\r\n mat = torch_get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n use_mel_scale=use_mel_scale)\r\n ffts = [torch_matmul_real_with_complex(f, mat) for f in frames]\r\n\r\n #sq_mag = lambda x: tf.square(tf.math.real(x)) + tf.square(tf.math.imag(x))\r\n sq_mag = lambda x: (torch.view_as_real(x)[:,0])**2 + (torch.view_as_real(x)[:,1])**2\r\n # torch.view_as_real() opreation need the last release edition of Pytorch 1.6.0\r\n specs_sq = [sq_mag(f) for f in ffts]\r\n\r\n if use_mel_scale and proj_method == 'fft':\r\n sample_rate = 24000\r\n upper_edge_hertz = sample_rate / 2.\r\n lower_edge_hertz = sample_rate / length\r\n # lin_to_mel = tf.signal.linear_to_mel_weight_matrix(\r\n # num_mel_bins=num_spec_bins,\r\n # num_spectrogram_bins=length // 2 + 1,\r\n # sample_rate=sample_rate,\r\n # lower_edge_hertz=lower_edge_hertz,\r\n # upper_edge_hertz=upper_edge_hertz,\r\n # dtype=tf.dtypes.float32)[1:]\r\n # specs_sq = [tf.matmul(s, lin_to_mel) for s in specs_sq]\r\n lin_to_mel = torch_build_mel_basis(\r\n num_mel_bins=num_spec_bins,\r\n num_spectrogram_bins=length,\r\n sample_rate=sample_rate,\r\n lower_edge_hertz=lower_edge_hertz,\r\n upper_edge_hertz=upper_edge_hertz,\r\n dtype=torch.float32)\r\n # TODO: I use librosa to build the mel filters here to instead, and i'm not sure whether this method works or not\r\n specs_sq = [torch.matmul(s, lin_to_mel) for s in specs_sq]\r\n\r\n # specs = [tf.sqrt(s+EPSILON) for s in specs_sq]\r\n specs = [torch.sqrt(s+EPSILON) for s in specs_sq]\r\n\r\n spec_len_wave.append(specs)\r\n\r\n spec_wave_len = zip(*spec_len_wave)\r\n return spec_wave_len", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def make_wavetables(kernel: GPy.kern.Kern, n: int = 17, waveshaping: bool = False) -> List[np.ndarray]:\n wavetables = []\n\n if not waveshaping:\n cholesky = make_cov_cholesky(kernel)\n else:\n cholesky = make_cov_cholesky_waveshaping(kernel)\n for _ in range(n):\n wavetable = fast_normal_from_cholesky(cholesky)[0]\n wavetables.append(wavetable[:-1])\n\n return wavetables", "def select(arrays, index):\n if arrays is None or any(i is None for i in arrays):\n return arrays\n return tuple(i.ravel()[index] for i in arrays)", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def dtw_list_store(source, target, source_list, target_list):\n\n dtw_source = []\n dtw_target = []\n\n fs, source = scipy.io.wavfile.read(source)\n fs, target = scipy.io.wavfile.read(target)\n\n\n #source = psf.mfcc(source, 16000)\n #target = psf.mfcc(target, 16000)\n\n source, energy = psf.fbank(source, 16000)\n target, energy = psf.fbank(target, 16000)\n\n distance, path = fastdtw(source, target, dist=euclidean)\n\n for vertex in path:\n dtw_source.append(source[vertex[0],:])\n dtw_target.append(target[vertex[1],:])\n\n dtw_source = np.array(dtw_source)\n dtw_target = np.array(dtw_target)\n\n\n source_list.append(dtw_source)\n target_list.append(dtw_target)", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data", "def _pick_elements(self,regexp_ind,array_list):\r\n new_array_list = [] #New list with elements matching regexp_ind\r\n array_indices = [] #Indices that matches the arrays in new_array_list and array_list\r\n\r\n array_index = 0\r\n for array in array_list:\r\n _new = []\r\n for ai in array:\r\n if ai in regexp_ind:\r\n _new.append(ai)\r\n if len(_new):\r\n new_array_list.append(np.array(_new))\r\n array_indices.append(array_index)\r\n array_index += 1\r\n return new_array_list, array_indices", "def load_all_audios(\n df: pd.DataFrame,\n *,\n target_sample_rate: int = None,\n mono: bool = constants.STEREO_TO_MONO_DEFAULT.value\n) -> Tuple[List[int], List[np.ndarray]]:\n file_list = list(df[\"audio_file_path\"])\n\n # audios is a list of (rate: int, data: np.ndarray)\n audios = load_multiple_wav(file_list)\n rate = [i[0] for i in audios]\n data = [i[1] for i in audios]\n\n # Convert to mono if needed\n if mono:\n data = p_map(stereo_to_mono, data, desc=\"Converting to mono...\")\n\n # Resample if needed\n if target_sample_rate is not None:\n data = p_map(resample, data, rate, [\n target_sample_rate for _ in data], desc=\"Resampling...\")\n rate = [target_sample_rate for _ in data]\n\n return rate, data", "def arrays(self, select_output):\n pass", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def my_get_paths_to_wavs(self, path):\n if self.label_type == 'original':\n # Just get all files\n return get_paths_to_wavs(path)\n elif self.label_type == 'four':\n # Filter out emotions Excitement and Frustration, leaving only\n # anger, happiness, neutral, sadness.\n new_paths_to_wavs = []\n paths_to_wavs, path_to_noise = get_paths_to_wavs(path)\n for file in paths_to_wavs:\n file_name = os.path.split(file)[1]\n emotion_label = self.get_emotion_label(file_name)\n if emotion_label in ('ang', 'hap', 'neu', 'sad'):\n new_paths_to_wavs.append(file)\n return new_paths_to_wavs, path_to_noise\n else:\n raise ValueError('Unknown label type! Should be either \"original\" for all samples, or \"four\" for anger, '\n 'happiness, neutral, sadness')", "def pick_samples_1D(arr, indices, dtype = np.float32):\n\n n_samples = len(indices)\n\n arr_samples = np.zeros((n_samples), dtype = dtype)\n\n for i, index in enumerate(indices):\n arr_samples[i] = arr[index]\n\n return arr_samples", "def wand_features(data, signals=EMG_SIGNALS, frame_len=EMG_FRAME_LEN,\n frame_shift=EMG_SHIFT_LEN, k=10):\n\n # samples is n_signals x n_timesteps\n samples = np.array(data[signals].T)\n phones = compute_subphones(data[\"phone\"])\n\n n_signals, n_timesteps = samples.shape[0], samples.shape[1]\n\n # Create the 17-point weighted moving average filter shown in Figure 4.2.\n ramp_filter = np.linspace(0,0.1,num=9)\n ma_filter = np.concatenate((ramp_filter[:-1], ramp_filter[::-1]))\n assert len(ma_filter) == 17\n \n n_frames = int(n_timesteps / frame_shift)\n n_feats = 5\n features = np.zeros((n_signals, n_feats, n_frames))\n frame_phones = []\n\n for i in range(n_signals):\n # Mean normalize\n x = samples[i] - np.mean(samples[i])\n\n # Apply moving average filter to compute low frequency signal w\n w = np.convolve(x, ma_filter, mode=\"same\")\n\n # Compute high frequency signal p\n p = x - w\n\n # Compute rectified signal r\n r = abs(p)\n\n # Ignore any frames that are incomplete (i.e. if n_timesteps is 2500 but \n # n_frames is 416 and frame_shift is 6, count up to 416*6 = 2496 rather\n # than 2500 timesteps, so we don't end up with a unit in the features that\n # is made up of an incomplete set of samples)\n for frame_id, t in enumerate(range(0, n_frames*frame_shift, frame_shift)):\n w_frame = w[t:t+frame_len]\n p_frame = p[t:t+frame_len]\n r_frame = r[t:t+frame_len]\n M_w = np.mean(w_frame) # Frame-based mean of w\n P_w = np.mean(w_frame * w_frame) # Frame-based power of w\n P_r = np.mean(r_frame * r_frame) # Frame-based power of r\n M_r = np.mean(r_frame) # Frame-based mean of r\n\n # Zero-crossing rate of p\n z_p = len(np.where(np.diff(np.signbit(p_frame)))[0]) / len(p_frame)\n\n features[i, :, frame_id] = np.array([M_w, P_w, P_r, z_p, M_r])\n mode_phone = mode(phones[t:t+frame_len])\n frame_phones.append(mode_phone)\n\n features = np.reshape(features, [-1, n_frames])\n\n features, labels = stack_context(features, k=k, labels=frame_phones)\n\n return features, labels", "def get_trimmed_features(words, num_recordings, base_path=\"\", energy_threshold=0.001):\n\n features_by_word = []\n for i in range(len(words)):\n indexes = []\n feature_array = []\n for j in range(1, num_recordings[i] + 1):\n # Determine the path\n path = base_path + words[i] + str(j) + \".wav\"\n (rate, data) = get_sig(path)\n # features is all the audio features for a given file\n features = get_st_features(data, rate)[0]\n # features[1] is total frame energies\n # energy threshold of 0.001 is arbitrary\n indexes.append(relevant_indexes(features[1], energy_threshold))\n # Add features for this specific audio file to the feature array for this word\n feature_array.append(features)\n # Finds the minimum index of all start indexes\n min_index = sorted(indexes, key=lambda x: x[0])[0][0]\n # Finds the max index of all end indexes\n max_index = sorted(indexes, key=lambda x: x[1])[::-1][0][1]\n # Debug print statements commented out\n # print(\"min, max index for word\", words[i])\n # print(min_index, max_index)\n # Only take the frames between min index and max index for each sample word\n # Note: Potential for a bug; if maxIndex is outside the length of its frame array\n # To fix, need to pad the shorter recordings with extra data\n features_by_word.append([x[0:34, min_index:max_index].transpose() for x in feature_array])\n # print(numpy.shape(features_by_word[i]))\n # features_by_word is an array of len(words) cells\n # Each cell has num_recordings[i] elements corresponding to the number of recordings of each word words[i]\n # Each recording has the same number of frames for a given word, as determined by minIndex and maxIndex\n # for a given word.\n # Finally, each frame contains the 34 features from that frame's raw data samples\n return features_by_word", "def get_1d_features(waveforms):\n durations = []\n PTratio= []\n repolarizationslope= []\n recoveryslope = []\n for i in range(len(waveforms)): \n waveform=waveforms[i,:] \n durations.append(get_waveform_duration(waveform))\n PTratio.append(get_waveform_PTratio(waveform))\n repolarizationslope.append(get_waveform_repolarizationslope(waveform))\n recoveryslope.append(get_waveform_recoveryslope(waveform))\n return np.array(durations), np.array(PTratio), np.array(repolarizationslope), np.array(recoveryslope)", "def karplus_strong(wavetable,nSamples):\n samples = []\n current_sample = 0\n previous_value = 0\n while len(samples) < nSamples:\n wavetable[current_sample] = 0.5 * (wavetable[current_sample] + previous_value)\n samples.append(wavetable[current_sample])\n previous_value = samples[-1]\n current_sample += 1\n current_sample = current_sample % wavetable.size\n return np.array(samples)", "def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data", "def big_sweep(all_kernels: List[GPy.kern.Kern], path: str, ls_subdivisions: int = 16, n_wavetables: int = 7) -> None:\n out_long = WavFile(os.path.join(path, 'c.wav'))\n\n delta_t = 1.\n ls_start = 0.01\n ls_end = np.pi\n\n score = []\n time = 0.\n l_vals = np.geomspace(ls_start, ls_end, ls_subdivisions).tolist()\n\n n_combinations = 1000\n for _ in range(n_combinations):\n k1_str = random.choice(all_kernels)\n while True:\n k2_str = random.choice(all_kernels)\n if k2_str != k1_str:\n break\n l1 = random.choice(l_vals)\n l2 = random.choice(l_vals)\n l1_idx = l_vals.index(l1)\n l2_idx = l_vals.index(l2)\n\n k1 = kernel_for_string(k1_str, lengthscale=l1)\n k2 = kernel_for_string(k2_str, lengthscale=l2)\n operator = random.choice(['plus', 'times'])\n if operator == 'plus':\n kernel = k1 + k2\n else:\n kernel = k1 * k2\n\n waveshaping = random.choice([True, False])\n\n synth = GPSynth(kernel, out_rt=None, out_wav=out_long, n_wavetables=n_wavetables, waveshaping=waveshaping)\n print(f'waveshaping={waveshaping}', k1_str, l1, operator, k2_str, l2)\n for n_idx in range(1): # only one note to c.wav otherwise the file becomes too big for the web.\n score.append({\n 'kernel_1': k1_str,\n 'operator': 'plus',\n 'kernel_2': k2_str,\n 'lengthscale_1': l1,\n 'lengthscale_1_idx': l1_idx,\n 'lengthscale_2': l2,\n 'lengthscale_2_idx': l2_idx,\n 'waveshaping': waveshaping,\n 'time': time,\n 'note': n_idx\n })\n synth.note(60, delta_t)\n time += delta_t\n\n waveshaping_str = 'waveshaping_' if waveshaping else ''\n prefix = waveshaping_str + k1_str + f'_l{l1_idx:03d}(plus)' + k2_str + f'_l{l2_idx:03d}_n'\n synth.save_wavetables(os.path.join(path, 'samples'), prefix)\n\n for waveshaping in [False, True]:\n for kernel_str in all_kernels:\n ls_start = 0.01\n ls_end = np.pi\n l_vals = np.geomspace(ls_start, ls_end, ls_subdivisions)\n for l_idx, lengthscale in enumerate(l_vals):\n k = kernel_for_string(kernel_str, lengthscale=lengthscale)\n synth = GPSynth(k, out_rt=None, out_wav=out_long, n_wavetables=n_wavetables, waveshaping=waveshaping)\n print(f'waveshaping={waveshaping}', kernel_str, lengthscale, f'waveshaping = {waveshaping}')\n for n_idx in range(1): # only one note to c.wav otherwise the file becomes too big for the web.\n score.append({\n 'kernel_1': kernel_str,\n 'operator': '',\n 'kernel_2': '',\n 'lengthscale_1': lengthscale,\n 'lengthscale_1_idx': l_idx,\n 'lengthscale_2': -1,\n 'lengthscale_2_idx': -1,\n 'waveshaping': waveshaping,\n 'time': time,\n 'note': n_idx\n })\n synth.note(60, delta_t)\n time += delta_t\n\n waveshaping_str = 'waveshaping_' if waveshaping else ''\n prefix = waveshaping_str + kernel_str + f'_l{l_idx:03d}_n'\n synth.save_wavetables(os.path.join(path, 'samples'), prefix)\n\n with open(os.path.join(path, 'score.json'), 'w') as f:\n json.dump(score, f, indent=4)", "def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)", "def torch_sample(array, indexes, desired_shape):\n torch_arr = torch.tensor(array, dtype=torch.float32)\n indexed = torch_arr[[indexes[0], indexes[1]]]\n return indexed.reshape(desired_shape)", "def get_table_arrays(self):\n# ftable = np.asarray(self.filter_table)\n ftable = self.filter_table\n wavelength = []\n transmission = []\n for item in ftable:\n wavelength.append(item[0])\n transmission.append(item[1])\n wavelength = np.asarray(wavelength)\n transmission = np.asarray(transmission)\n return (wavelength, transmission)", "def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels", "def preprocess_single_chords_list(self, window_size=5, flattened_window=True, hop_length=4410, to_skip=5, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, skip_coef=1) -> tuple:\n prep_data = []\n prep_targets = []\n k = 0\n # Iterate over all audio files\n for audio, chords, keys in zip(self.DATA, self.CHORDS, self.KEYS):\n print(k)\n k = k+1\n # Get log mel spectrogram\n spectrogram = IsophonicsDataset.preprocess_audio(waveform=audio.WAVEFORM, sample_rate=audio.SAMPLE_RATE, spectrogram_generator=spectrogram_generator, nfft=self.NFFT, hop_length=hop_length, norm_to_C=norm_to_C, key=keys.get_first_key())\n spectrogram = np.array(spectrogram)\n spec_length, num_samples = spectrogram.shape\n\n # Collect data for each spectrogram sample\n j = 0 # labels index\n for i in [index for index in range(num_samples) if index%to_skip==0]:\n # Get data window with zero margin\n n_pre_zeros, window_indices, n_post_zeros = IsophonicsDataset.get_flatten_indices(i, num_samples, skip_coef, window_size)\n if flattened_window:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0).flatten()\n )\n else:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0)\n )\n\n\n # Get label\n second = float(i)/(float(self.SAMPLE_RATE) / float(hop_length))\n while j < len(chords.START) and second > chords.START[j] :\n j = j + 1\n if j == len(chords.START):\n prep_targets.append(Dataset.get_integered_chord(\"N\", norm_to_C, keys.get_first_key()))\n else:\n prep_targets.append(Dataset.get_integered_chord(chords.CHORD[j], norm_to_C, keys.get_first_key()))\n\n print(\"[INFO] The Isophonics Dataset was successfully preprocessed.\")\n return np.array(prep_data), np.array(prep_targets)", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def subset(mylist,mybool):\n myarray = np.array(mylist)\n return(np.squeeze(myarray.take(np.where(mybool),axis=0)))", "def get_egs(wavlist, path, min_mix=2, max_mix=3, batch_size=1):\n speaker_wavs = {}\n batch_x = []\n batch_y = []\n batch_count = 0\n\n while True: # Generate examples indefinitely\n wavsum = None\n sigs = []\n\n # Pop wav files from random speakers, store them individually for\n # dominant spectra decision and generate the mixed input\n file_name = wavlist.pop(0)\n wavlist.append(file_name)\n s1, rate = sf.read('/scratch/near/2speakers_6channel/wav16k/min/'+path+'/s1/'+file_name)\n if rate != FRAME_RATE:\n raise Exception(\"Config specifies \" + str(FRAME_RATE) +\n \"Hz as sample rate, but file \" + str(p) +\n \"is in \" + str(rate) + \"Hz.\")\n s1 = s1 - np.mean(s1)\n s1 = s1/np.max(np.abs(s1))\n s1 *= (np.random.random()*1/4 + 3/4)\n s2, rate = sf.read('/scratch/near/2speakers_6channel/wav16k/min/'+path+'/s2/'+file_name)\n if rate != FRAME_RATE:\n raise Exception(\"Config specifies \" + str(FRAME_RATE) +\n \"Hz as sample rate, but file \" + str(p) +\n \"is in \" + str(rate) + \"Hz.\")\n s2 = s2 - np.mean(s2)\n s2 = s1/np.max(np.abs(s2))\n s2 *= (np.random.random()*1/4 + 3/4)\n\n wavsum = s1[:min(len(s1),len(s2))] + s2[:min(len(s1),len(s2))]\n sigs.append(s1)\n sigs.append(s2)\n\n\n # STFT for mixed signal\n def get_logspec(sig):\n return np.log10(np.absolute(stft(sig)) + 1e-7)\n\n X = get_logspec(wavsum)\n if len(X) <= TIMESTEPS:\n continue\n\n # STFTs for individual signals\n specs = []\n for sig in sigs:\n specs.append(get_logspec(sig[:len(wavsum)]))\n specs = np.array(specs)\n\n nc = max_mix\n\n # Get dominant spectra indexes, create one-hot outputs\n Y = np.zeros(X.shape + (nc,))\n vals = np.argmax(specs, axis=0)\n for i in range(2):\n t = np.zeros(nc)\n t[i] = 1\n Y[vals == i] = t\n\n # Create mask for zeroing out gradients from silence components\n m = np.max(X) - DB_THRESHOLD/20. # From dB to log10 power\n z = np.zeros(nc)\n Y[X < m] = z\n\n # Generating sequences\n i = 0\n while i + TIMESTEPS < len(X):\n batch_x.append(X[i:i+TIMESTEPS])\n batch_y.append(Y[i:i+TIMESTEPS])\n i += TIMESTEPS//2\n\n batch_count = batch_count+1\n\n if batch_count == batch_size:\n inp = np.array(batch_x).reshape((batch_size,\n TIMESTEPS, -1))\n out = np.array(batch_y).reshape((batch_size,\n TIMESTEPS, -1))\n yield({'input': inp},\n {'kmeans_o': out})\n batch_x = []\n batch_y = []\n batch_count = 0", "def preprocess_single_chords_list(self, window_size=5, flattened_window=True, hop_length=4410, to_skip=5, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, skip_coef=1) -> tuple:\n prep_data = []\n prep_targets = []\n k = 0\n # Iterate over all audio files\n for audio, chords, desc in zip(self.DATA, self.CHORDS, self.DESC):\n print(k)\n k = k+1\n # Get log mel spectrogram\n spectrogram = IsophonicsDataset.preprocess_audio(waveform=audio.WAVEFORM, sample_rate=audio.SAMPLE_RATE, spectrogram_generator=spectrogram_generator, nfft=self.NFFT, hop_length=hop_length, norm_to_C=norm_to_C, key=desc.TONIC)\n spectrogram = np.array(spectrogram)\n spec_length, num_samples = spectrogram.shape\n\n # Collect data for each spectrogram sample\n j = 0 # labels index\n for i in [index for index in range(num_samples) if index%to_skip==0]:\n # Get data window with zero margin\n n_pre_zeros, window_indices, n_post_zeros = IsophonicsDataset.get_flatten_indices(i, num_samples, skip_coef, window_size)\n if flattened_window:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0).flatten()\n )\n else:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0)\n )\n\n\n # Get label\n second = float(i)/(float(self.SAMPLE_RATE) / float(hop_length))\n while j < len(chords.START) and second > chords.START[j] :\n j = j + 1\n if j == len(chords.START):\n prep_targets.append(Dataset.get_integered_chord(\"N\", norm_to_C, desc.TONIC))\n else:\n prep_targets.append(Dataset.get_integered_chord(chords.CHORD[j], norm_to_C, desc.TONIC))\n\n print(\"[INFO] The Billboard Dataset was successfully preprocessed.\")\n return np.array(prep_data), np.array(prep_targets)", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def samples(self):\n return np.concatenate([wf.samples for wf in self._waveforms])", "def generate_sound_samples(chord_frequencies, frequency_weights):\n samples = 0\n for i in range(len(frequency_weights)):\n samples = samples + (frequency_weights[i]) * (np.sin(2 * np.pi * np.arange(\n fs * duration) * chord_frequencies[i] / fs)).astype(np.float32)\n return samples", "def extract_multi_wavelet(\n self, min_freq=0.06, max_freq=0.66, bank=8, *args, **kwargs\n ):\n out = []\n for f in np.geomspace(min_freq, max_freq, bank):\n out.append(self.extract_wavelet(f, *args, **kwargs))\n return self.__class__(\n pd.concat(out, axis=1),\n sampling_freq=self.sampling_freq,\n features=self.features,\n sessions=self.sessions,\n )", "def karplus_strong(wavetable, n_samples):\r\n samples = []\r\n current_sample = 0\r\n previous_value = 0\r\n while len(samples) < n_samples:\r\n wavetable[current_sample] = 0.5 * (wavetable[current_sample] + previous_value)\r\n samples.append(wavetable[current_sample])\r\n previous_value = samples[-1]\r\n current_sample += 1\r\n current_sample = current_sample % wavetable.size\r\n return np.array(samples)", "def get_data_rescaled(self, wave):\n m = (self.max_threshold - self.min_threshold)/(np.max(wave) - np.min(wave))\n b = self.min_threshold - m * np.min(wave)\n wave = m * wave + b\n return np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])", "def use_w(args):\n try:\n bounddata = Table.read(\n f'./Input/UseWv/WaveRegions_{args.WRegion}_{args.band}.csv',\n format='csv')\n except IOError:\n sys.exit(\n f'WaveRegions FILE \"./Input/UseWv/WaveRegions'\n '_{args.WRegion}_{args.band}.csv\" NOT FOUND!')\n\n wavesols = pd.read_csv(f'./Input/UseWv/WaveSolns_{args.band}.csv')\n#-------------------------------------------------------------------------------\n XRegion_dir = f'./Input/UseWv/XRegions_{args.WRegion}_{args.band}.csv'\n with open(XRegion_dir,'w') as filew:\n filew.write('order, start, end, masks\\n')\n\n m_order = np.array(bounddata['order'])\n starts = np.array(bounddata['start'])\n ends = np.array(bounddata['end'])\n ords = list( sorted(OrderDictCla().orderdict[args.band].keys()) )\n\n Ostarts = [OrderDictCla().orderdict[args.band][k][0] for k in ords]\n Oends = [OrderDictCla().orderdict[args.band][k][1] for k in ords]\n labels = []\n\n m_orders_unique = np.unique(m_order)\n\n # For each order specified, find what pixel numbers correspond to the\n # wavelength bounds presented.\n # If multiple wavelength bounds given for a single order, output a\n # pixel mask between the two, as well.\n for o in range(len(m_orders_unique)):\n\n # if len(m_orders_unique) == 9:\n # filew.write('9, 150, 1950, []\\n')\n # continue\n\n pixs = []\n mini = np.where(m_order == m_orders_unique[o])[0]\n for j in range(len(mini)):\n i = mini[j]\n\n wavebounds = [starts[i],ends[i]]\n wO = wavesols['w'+str(m_orders_unique[o])]\n pixO = wavesols['x'+str(m_orders_unique[o])]\n pix = [pixO[(np.argmin(abs(wO-wavebounds[k])))] for k in [0,1]]\n pixs = pixs + pix\n\n pixsS = list(sorted(pixs))\n q = pixsS[1:-1]\n if len(pixsS) == 2:\n filew.write('{}, {}, {},[]\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1])\n )\n else:\n filew.write('{}, {}, {},\"{}\"\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1],\n [[first,second] for first, second in zip(q[0::2], q[1::2])]\n ))", "def select_body_parts(mask, list_of_body_parts):\n\n new_mask = np.zeros(mask.shape).astype(np.bool)\n\n for body_part in list_of_body_parts:\n idxs = body_parts[body_part]\n\n for idx in idxs:\n m_ = (mask == idx)\n new_mask = np.bitwise_or(new_mask, m_)\n\n return new_mask.astype(np.uint8)", "def compound_sound(freqs, duration, samples_per_sec=None):\n if samples_per_sec is None:\n samples_per_sec = 44100\n\n time = np.arange(0,duration*samples_per_sec)\n snd = np.zeros_like(time)\n \n for f in freqs:\n snd = snd + np.sin(time*f*(2*np.pi)/samples_per_sec)\n\n # window the sound vector with a 50 ms raised cosine\n numAtten = np.round(samples_per_sec*.05);\n # don't window if requested sound is too short\n if len(snd) >= numAtten:\n snd[:numAtten/2] *= window_hanning(np.ones(numAtten))[:numAtten/2]\n snd[-(numAtten/2):] *= window_hanning(np.ones(numAtten))[-(numAtten/2):]\n\n # normalize\n snd = snd/np.max(np.abs(snd))\n\n return snd", "def compound_sound(freqs, duration, samples_per_sec=None):\n if samples_per_sec is None:\n samples_per_sec = 44100\n\n time = np.arange(0,duration*samples_per_sec)\n snd = np.zeros_like(time)\n \n for f in freqs:\n snd = snd + np.sin(time*f*(2*np.pi)/samples_per_sec)\n\n # window the sound vector with a 50 ms raised cosine\n numAtten = np.round(samples_per_sec*.05);\n # don't window if requested sound is too short\n if len(snd) >= numAtten:\n snd[:numAtten/2] *= window_hanning(np.ones(numAtten))[:numAtten/2]\n snd[-(numAtten/2):] *= window_hanning(np.ones(numAtten))[-(numAtten/2):]\n\n # normalize\n snd = snd/np.max(np.abs(snd))\n\n return snd", "def Select_Data(spec,wave_edges):\n\tif len(wave_edges) < 2: \n\t\traise ValueError('must be at least two bin edges!')\n\n\twave,flux,error,dfp,dfm= spec\n\tinds = np.where( (wave > wave_edges[0]) & (wave <= wave_edges[1]) )[0]\n\n\treturn wave[inds], flux[inds],error[inds], dfp[inds], dfm[inds]", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def get_sample_mask(self):", "def split_diphones(wav_path, outdir=None):\n tg = ml.parsing.textgrid_reader.read(textgrid_path(wav_path))\n word = os.path.splitext(os.path.basename(wav_path))[0]\n\n wav_dir = os.path.dirname(wav_path)\n diphones_dir = os.path.join(wav_dir, \"diphones\")\n\n if not os.path.exists(diphones_dir):\n os.mkdir(diphones_dir)\n\n wav = AudioSegment.from_file(wav_path)\n for (begin, end, diphone) in tg[u'phones']:\n diphone = diphone.strip().replace(\"-\", \"_\")\n if len(diphone) > 0 and diphone[0] != \".\":\n diphone_file = \"{}_{}.wav\".format(diphone, word)\n diphone_path = os.path.join(diphones_dir, diphone_file)\n\n # Works in milliseconds\n segment = wav[(begin * 1000):(end * 1000)]\n print(\"Saving {} ({} - {})\".format(diphone_path, begin, end))\n segment.export(diphone_path, format=\"wav\")\n elif diphone[0] == \".\":\n print(\"skipping {}\".format(diphone))", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def _get_spectrograms(self, index):\n file = self._waves[index]\n\n # get hyper-parameters\n hp = self.hparams\n\n w, _ = lr.load(file, sr=hp.sr)\n w, _ = lr.effects.trim(w) # triming\n\n linear = audio.wave2spec(w, hp)\n\n return linear, w", "def selectElements(self, f, elements):\n if isinstance(elements, types.StringTypes):\n m = self.elementIndex(elements)\n return f[m]\n if elements:\n fs = []\n k = 0\n for s in elements:\n k = self.elementIndex(s)\n fs.append(f[k])\n return asarray(fs)\n else:\n return asarray(f)", "def enframe(samples, winlen, winshift):\n\n # check if i+winlen > len(samples):\n\n result = []\n for i in range(0,len(samples),winshift):\n if(i+winlen > len(samples)): break\n result.append(samples[i:i+winlen])\n return np.array(result)\n # return np.array([samples[i:i+winlen] for i in range(0,len(samples),winshift)])", "def custom_sound(type_of, attack, decay, cutoff, coef, time, freq):\n dzw = np.zeros(time*44100)\n l=0\n for i in type_of:\n if i==\"sin\":\n dzw+= coef[l]*sin_custom(freq,time,attack[l],decay[l])\n if i==\"sq\":\n dzw+= coef[l]*sq_custom(freq,time,attack[l],decay[l])\n if i==\"saw\":\n dzw+= coef[l]*saw_custom(freq,time,attack[l],decay[l])\n l+=1 \n dzw[(1-cutoff)*time*44100 -1:]==0\n dzw = np.repeat(dzw,2).reshape(len(dzw),2)\n dzw = dzw/np.amax(dzw)\n return(dzw)", "def mels_to_audio(\n self, mels: np.ndarray, settings: typing.Optional[SettingsType] = None,\n ) -> np.ndarray:\n pass", "def preprocess_data(num_mfcc_coeffs, num_filters, window_len, window_step, max_num_frames):\n inputs = [] \n labels = [] \n \n SOURCE_DIR = '../data/cmu_arctic/scottish-english-male-awb/wav/' \n TARGET_DIR = '../data/cmu_arctic/us-english-male-bdl/wav/'\n index = 0\n for source_fname, target_fname in zip(os.listdir(SOURCE_DIR), os.listdir(TARGET_DIR)):\n if index >= 20:\n break\n index += 1\n\n if source_fname == '.DS_Store' or target_fname == '.DS_Store':\n continue\n\n (source_sample_rate, source_wav_data) = wav.read(SOURCE_DIR + source_fname) \n (target_sample_rate, target_wav_data) = wav.read(TARGET_DIR + target_fname)\n\n source_mfcc_features = np.array(mfcc(source_wav_data, samplerate=source_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n target_mfcc_features = np.array(mfcc(target_wav_data, samplerate=target_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n\n # align with FastDTW\n source_mfcc_features, target_mfcc_features = get_dtw_series(source_mfcc_features, target_mfcc_features)\n\n # pad MFCC feature matrices (rows) to max_num_frames\n source_padded_frames = pad_sequence(source_mfcc_features, max_num_frames)\n target_padded_frames = pad_sequence(target_mfcc_features, max_num_frames)\n\n inputs.append(source_padded_frames) \n labels.append(target_padded_frames) \n\n return inputs, labels", "def ccwt(self, data_arr):\n data_arr = np.asarray(data_arr, dtype=np.float32, order='C')\n check_audio(data_arr, is_mono=False)\n\n data_len = data_arr.shape[-1]\n win_len = self.fft_length // 4\n step = win_len * 2\n win_count = (data_len // step) - 1\n\n ret_arr = []\n for i in range(win_count):\n sample_arr = data_arr[..., i * step:i * step + self.fft_length]\n if sample_arr.shape[-1] != self.fft_length:\n break\n cur_spec_arr = self.cwt(sample_arr)\n\n start_idx = 0 if i == 0 else win_len\n end_idx = self.fft_length if i == (win_count - 1) else (win_len * 3)\n\n valid_spec_arr = cur_spec_arr[..., start_idx:end_idx]\n ret_arr.append(valid_spec_arr)\n\n ret_arr = np.concatenate(tuple(ret_arr), axis=-1)\n return ret_arr", "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def world_feature_extract(wav_list, args):\n # define feature extractor\n feature_extractor = FeatureExtractor(\n analyzer=\"world\",\n fs=args.fs,\n shiftms=args.shiftms,\n minf0=args.minf0,\n maxf0=args.maxf0,\n fftl=args.fftl)\n\n for i, wav_name in enumerate(wav_list):\n logging.info(\"now processing %s (%d/%d)\" % (wav_name, i + 1, len(wav_list)))\n\n # load wavfile and apply low cut filter\n fs, x = wavfile.read(wav_name)\n if x.dtype != np.int16:\n logging.warning(\"wav file format is not 16 bit PCM.\")\n x = np.array(x, dtype=np.float64)\n if args.highpass_cutoff != 0:\n x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)\n\n # check sampling frequency\n if not fs == args.fs:\n logging.error(\"sampling frequency is not matched.\")\n sys.exit(1)\n\n # extract features\n f0, _, _ = feature_extractor.analyze(x)\n uv, cont_f0 = convert_to_continuos_f0(f0)\n cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)\n codeap = feature_extractor.codeap()\n mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)\n\n # concatenate\n cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)\n uv = np.expand_dims(uv, axis=-1)\n feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)\n\n # save to hdf5\n hdf5name = args.hdf5dir + \"/\" + os.path.basename(wav_name).replace(\".wav\", \".h5\")\n write_hdf5(hdf5name, \"/world\", feats)\n\n # overwrite wav file\n if args.highpass_cutoff != 0 and args.save_wav:\n wavfile.write(args.wavdir + \"/\" + os.path.basename(wav_name), fs, np.int16(x))", "def findwavelengthsolution(xarr, farr, sl, sf, ws, mdiff=20, wdiff=20, sigma=5,\n niter=5):\n # match up the features\n # xp, wp=findfeatures(xarr, farr, sl, sf, ws, mdiff=mdiff, wdiff=wdiff,\n # sigma=sigma, niter=niter)\n xp, wp = crosslinematch(xarr, farr, sl, sf, ws, mdiff=mdiff, wdiff=wdiff,\n sigma=sigma, niter=niter)\n\n # find the solution to the best fit\n mask = (wp > 0)\n if mask.sum() >= ws.order:\n nws = WavelengthSolution.WavelengthSolution(\n xp[mask], wp[mask], model=ws.model)\n nws.fit()\n else:\n nws = None\n # for i in range(len(xp)): print xp[i], wp[i], wp[i]-nws.value(xp[i])\n # print nws.sigma(xp,wp)\n return nws", "def get_strong_target(audio_name, strong_meta_dict, frames_num, \n frames_per_second, lb_to_idx):\n \n meta_list = strong_meta_dict[audio_name]\n \n target = np.zeros((frames_num, len(lb_to_idx)), dtype=np.bool)\n \n for meta in meta_list:\n begin_time = float(meta['begin_time']) \n begin_frame = int(round(begin_time * frames_per_second))\n end_time = float(meta['end_time'])\n end_frame = int(round(end_time * frames_per_second)) + 1\n label = meta['label']\n idx = lb_to_idx[label]\n \n target[begin_frame : end_frame, idx] = 1\n \n return target", "def getPulseWave(ham: Dict[str, Any], names: Union[str, List[str]]) -> Dict[str, Any]:\n if isinstance(names, str):\n return ham[\"control\"][names][\"waveforms\"]\n else:\n waves = {}\n for name in names:\n wave = ham[\"control\"][names][\"waveforms\"]\n waves[name] = wave\n return waves", "def select_one_chanel(data, ch_idx):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements[:,ch_idx])\r\n return np.array(result)", "def waves_to_data(self, waves):\n raise NotImplementedError", "def find_wavs(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)", "def wave_samples(self):\n return self._quantized_subsamples", "def get_audios_and_labels(data_dir: str) -> (List[Any], List[int]):\n test_dataset = get_dataset(data_dir)\n test_audios = []\n test_labels = []\n for audio, label in test_dataset:\n test_audios.append(audio.numpy())\n test_labels.append(label.numpy())\n test_audios = np.array(test_audios)\n test_labels = np.array(test_labels)\n return test_audios, test_labels", "def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[str]:\n pass", "def play_sound(self, wavel, waver=None, samplefreq=44100, postduration = 0.05, attns=[20., 20.],\n isi=1.0, reps=1, storedata=True): \n if storedata:\n runmode = RZ5D_Run\n else:\n runmode = RZ5D_Preview\n # create an output waveform that has the stimulus repeated reps times with the selected ISI\n samplefreq = self.out_sampleFreq\n stimulus_duration = isi*reps # len(wavel)*samplefreq + postduration\n pts_per_rep = int(float(isi)*samplefreq)\n if wavel.shape[0] < pts_per_rep:\n wavel = np.concatenate((wavel, np.zeros(pts_per_rep-wavel.shape[0])), axis=0)\n wavel = np.tile(wavel, reps)\n if waver is not None:\n if waver.shape[0] < pts_per_rep:\n waver = np.concatenate((waver, np.zeros(pts_per_rep-waver.shape[0])), axis=0)\n waver = np.tile(waver, reps)\n \n \n # different approaches to playing out the sound for different hardware configuration:\n \n if 'pyaudio' in self.hardware:\n self.audio = pyaudio.PyAudio()\n chunk = 1024\n FORMAT = pyaudio.paFloat32\n CHANNELS = 2\n RATE = samplefreq\n if self.debugFlag:\n print (\"pysounds.play_sound: samplefreq: %f\" % (RATE))\n self.stream = self.audio.open(format = FORMAT,\n channels = CHANNELS,\n rate = int(RATE),\n output = True,\n input = True,\n frames_per_buffer = chunk)\n # play stream\n #print self.stream\n wave = np.zeros(2*len(wavel))\n if len(wavel) != len(waver):\n print (\"pysounds.play_sound: waves not matched in length: %d vs. %d (L,R)\" % (len(wavel), len(waver)))\n return\n (waver, clipr) = self.clip(waver, 20.0)\n (wavel, clipl) = self.clip(wavel, 20.0)\n wave[0::2] = waver \n wave[1::2] = wavel # order chosen so matches etymotic earphones on my macbookpro.\n postdur = int(float(postduration*self.in_sampleFreq))\n #rwave = read_array(len(wavel)+postdur, CHANNELS)\n write_array(self.stream, wave)\n self.stream.stop_stream()\n self.stream.close()\n self.audio.terminate()\n #self.ch1 = rwave[0::2]\n #self.ch2 = rwave[1::2]\n return\n \n if 'PA5' in self.hardware:\n self.setAttens(atten_left=attns)\n \n if 'RZ5D' in self.hardware:\n swcount = -1\n self.present_stim(wavel, isi, reps, runmode) # this sets up the NI card as well.\n deadmantimer = isi*(reps+1)+0.5 # just in case it doesn't stop as it should\n start_time = time.time() # deadman start time\n# print('done? ', self.RZ5D.GetTargetVal(self.RZ5D_ParTags['SweepDone']))\n while self.RZ5D.GetTargetVal(self.RZ5D_ParTags['SweepDone']) == 0: # wait for zSwDone to be set\n cs = self.RZ5D.GetTargetVal(self.RZ5D_ParTags['CurrentSweep'])\n if cs > swcount:\n # print(' Sweep = %d' % cs)\n swcount = swcount + 1\n time.sleep(0.1)\n elapsed_time = time.time() - start_time # elapsed time is in seconds\n if elapsed_time > deadmantimer:\n print('DeadmanExit')\n break\n self.RZ5D.SetSysMode(RZ5D_Standby) # was (RZ5D_Standby)\n self.task.stop()\n self.setAttens(atten_left=120)\n # self.present_stim(wavel, waver)\n \n if 'RP21' in self.hardware:\n # now take in some acquisition...\n a = self.RP21.ClearCOF()\n if a <= 0:\n print (\"pystim.playSound: Unable to clear RP2.1\")\n return\n a = self.RP21.LoadCOFsf(\"C:\\pyStartle\\startle2.rco\", self.samp_cof_flag)\n if a > 0 and self.debugFlag:\n print (\"pystim.playSound: Connected to TDT RP2.1 and startle2.rco is loaded\")\n else:\n print (\"pystim.playSound: Error loading startle2.rco?, error = %d\" % (a))\n return\n self.trueFreq = self.RP21.GetSFreq()\n Ndata = np.ceil(0.5*(stimulus_duration)*self.trueFreq)\n self.RP21.SetTagVal('REC_Size', Ndata) # old version using serbuf -- with\n # new version using SerialBuf, can't set data size - it is fixed.\n # however, old version could not read the data size tag value, so\n # could not determine when buffer was full/acquisition was done.\n \n if 'PA5' in self.hardware:\n self.setAttens(atten_left=attns[0], atten_right=attns[1]) # set equal, but not at minimum...\n\n self.task.start() # start the NI AO task\n \n a = self.RP21.Run() # start the RP2.1 processor...\n a = self.RP21.SoftTrg(1) # and trigger it. RP2.1 will in turn start the ni card\n \n while not self.task.isTaskDone(): # wait for AO to finish?\n self.RP21.Halt()\n if 'NIDAQ' in self.hardware:\n self.task.stop()\n return\n \n if 'PA5' in self.hardware:\n self.setAttens() # attenuators down (there is noise otherwise)\n # read the data...\n curindex1 = self.RP21.GetTagVal('Index1')\n curindex2 = self.RP21.GetTagVal('Index2')\n \n while(curindex1 < Ndata or curindex2 < Ndata): # wait for input data to be sampled\n self.RP21.Halt()\n return\n curindex1 = self.RP21.GetTagVal('Index1')\n curindex2 = self.RP21.GetTagVal('Index2')\n \n self.ch2 = self.RP21.ReadTagV('Data_out2', 0, Ndata)\n # ch2 = ch2 - mean(ch2[1:int(Ndata/20)]) # baseline: first 5% of trace\n self.ch1 = self.RP21.ReadTagV('Data_out1', 0, Ndata)\n self.RP21.Halt()", "def all(self, name, codes):\n if self.is_array(name):\n logics = []\n for s in self.sources(name):\n logics.append({s: has_all(codes)})\n slicer = self.take(intersection(logics))\n else:\n slicer = self.take({name: has_all(codes)})\n return slicer", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def benchmark_select_rle_conversions():\n import kwimage\n import ubelt as ub\n c_mask = kwimage.Mask.random(shape=(256, 256))\n f_mask = c_mask.to_fortran_mask(copy=True)\n\n img = c_mask.data\n\n ti = ub.Timerit(1000, bestof=50, verbose=1)\n\n for timer in ti.reset('img -> encode_run_length(non-binary)'):\n with timer:\n kwimage.encode_run_length(img, binary=False)\n\n for timer in ti.reset('img -> encode_run_length(binary)'):\n with timer:\n kwimage.encode_run_length(img, binary=True)\n\n for timer in ti.reset('c_mask -> to_array_rle'):\n with timer:\n c_mask.to_array_rle()\n\n for timer in ti.reset('c_mask -> to_bytes_rle'):\n with timer:\n c_mask.to_bytes_rle()\n\n for timer in ti.reset('f_mask -> to_array_rle'):\n with timer:\n f_mask.to_array_rle()\n\n for timer in ti.reset('f_mask -> to_bytes_rle'):\n with timer:\n f_mask.to_bytes_rle()", "def analyze2(ys, freqs, ts):", "def fnutofwave(warr, farr):\n c= 2.99792458e18 #spped of light in Angstroms/s\n return farr*c/warr**2", "def get_wav(in_channels, pool=True):\n harr_wav_L = 1 / np.sqrt(2) * np.ones((1, 2))\n harr_wav_H = 1 / np.sqrt(2) * np.ones((1, 2))\n harr_wav_H[0, 0] = -1 * harr_wav_H[0, 0]\n\n harr_wav_LL = np.transpose(harr_wav_L) * harr_wav_L\n harr_wav_LH = np.transpose(harr_wav_L) * harr_wav_H\n harr_wav_HL = np.transpose(harr_wav_H) * harr_wav_L\n harr_wav_HH = np.transpose(harr_wav_H) * harr_wav_H\n\n filter_LL = torch.from_numpy(harr_wav_LL).unsqueeze(0)\n filter_LH = torch.from_numpy(harr_wav_LH).unsqueeze(0)\n filter_HL = torch.from_numpy(harr_wav_HL).unsqueeze(0)\n filter_HH = torch.from_numpy(harr_wav_HH).unsqueeze(0)\n\n if pool:\n net = nn.Conv2d\n else:\n net = nn.ConvTranspose2d\n\n LL = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n LH = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n HL = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n HH = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n\n LL.weight.requires_grad = False\n LH.weight.requires_grad = False\n HL.weight.requires_grad = False\n HH.weight.requires_grad = False\n\n LL.weight.data = filter_LL.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n LH.weight.data = filter_LH.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n HL.weight.data = filter_HL.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n HH.weight.data = filter_HH.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n\n return LL, LH, HL, HH", "def get_wav(in_channels, pool=True):\n harr_wav_L = 1 / np.sqrt(2) * np.ones((1, 2))\n harr_wav_H = 1 / np.sqrt(2) * np.ones((1, 2))\n harr_wav_H[0, 0] = -1 * harr_wav_H[0, 0]\n\n harr_wav_LL = np.transpose(harr_wav_L) * harr_wav_L\n harr_wav_LH = np.transpose(harr_wav_L) * harr_wav_H\n harr_wav_HL = np.transpose(harr_wav_H) * harr_wav_L\n harr_wav_HH = np.transpose(harr_wav_H) * harr_wav_H\n\n filter_LL = torch.from_numpy(harr_wav_LL).unsqueeze(0)\n filter_LH = torch.from_numpy(harr_wav_LH).unsqueeze(0)\n filter_HL = torch.from_numpy(harr_wav_HL).unsqueeze(0)\n filter_HH = torch.from_numpy(harr_wav_HH).unsqueeze(0)\n\n if pool:\n net = nn.Conv2d\n else:\n net = nn.ConvTranspose2d\n\n LL = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n LH = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n HL = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n HH = net(in_channels, in_channels,\n kernel_size=2, stride=2, padding=0, bias=False,\n groups=in_channels)\n\n LL.weight.requires_grad = False\n LH.weight.requires_grad = False\n HL.weight.requires_grad = False\n HH.weight.requires_grad = False\n\n LL.weight.data = filter_LL.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n LH.weight.data = filter_LH.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n HL.weight.data = filter_HL.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n HH.weight.data = filter_HH.float().unsqueeze(0).expand(in_channels, -1, -1, -1)\n\n return LL, LH, HL, HH", "def __call__(self, waveforms, telid, selected_gain_channel):", "def slice_vec_bands(doc_vectors, start=0, end=None,\n ndims=300, drop_imag=False):\n return numpy.array([\n flatten_fft(unflatten_vec(dv, ndims=ndims), start, end, drop_imag)\n for dv in doc_vectors\n ])", "def get_unit_spectrograms(\n spectrogram: np.ndarray,\n onsets: np.ndarray,\n offsets: np.ndarray,\n sr: int = 22050,\n hop_length: int = 512,\n) -> np.ndarray:\n\n units = []\n # Seconds to frames\n onsets = onsets * sr / hop_length\n offsets = offsets * sr / hop_length\n for on, off in zip(onsets.astype(np.int32), offsets.astype(np.int32)):\n unit = spectrogram[:, on:off]\n units.append(unit)\n\n return units", "def getWaveform(self, ch=\"CH1\", samples=2500):\n\t\tself.isReady()\n\t\tcounter = 1\n\t\twhile True:\n\t\t\ttry:\t\t\n\t\t\t\twaveform = self.osc.get_waveform(source = ch, start = 1, stop = samples)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(\"Retry: \" + str(counter))\n\t\t\t\tcounter += 1\n\t\ty_array = []\n\t\tfor x,y in waveform:\n\t\t\ty_array.append(y)\n\t\treturn y_array", "def waves(x, /, wavenums=None, wavelengths=None, phase=None, state=None):\n # Wavelengths\n if wavenums is None and wavelengths is None:\n raise ValueError('Must declare wavenums or wavelengths.')\n elif wavelengths is not None:\n wavenums = 1.0 / np.atleast_1d(wavelengths)\n wavenums = np.atleast_1d(wavenums)\n if np.isscalar(x):\n x = np.arange(x)\n data = np.zeros(x.shape) # user can make N-D array\n\n # Get waves\n if state is None:\n state = np.random\n if phase is None:\n phis = state.uniform(0, 2 * np.pi, len(wavenums))\n else:\n phis = phase * np.ones((len(wavenums),))\n for wavenum, phi in zip(wavenums, phis):\n data += np.sin(2 * np.pi * wavenum * x + phi)\n return data", "def getWaveSamples( self, positions ):\n\t\tif type(positions) == type(10):\n\t\t\tself.waveread.setpos(positions)\n\t\t\tif positions < 0 or positions > len(self.data)-1 :\n\t\t\t\treturn 0.5\n\t\t\t\treturn getWaveSample( positions )\n\n\t\telif type(positions) == type([1,]):\n\t\t\tsmps = []\n\t\t\tfor i in positions:\n\t\t\t\tif i < 0 or i > len(self.data)-1:\n\t\t\t\t\tsmps.append(0.5)\n\t\t\t\telse:\n\t\t\t\t\tsmps.append(self.getWaveSample( i ) )\n\t\t\treturn smps", "def _getWavelet(self, ch='dos1rate', thresh=0.1, maxWidth=1, SIGNIF_LEVEL=0.25):\n # Feed the counts into the wavelet microburst finder\n validDataIdt = np.where(self.d[ch] != -1E31)[0]\n waveletAnalysis.WaveletDetector.__init__(self, self.d[ch][validDataIdt], \n self.d['dateTime'][validDataIdt], 0.1, mother='DOG', siglvl=0.95)\n self.waveletTransform() # Get wavelet space\n self.waveletFilter(self.s0, maxWidth, SIGNIF_LEVEL=SIGNIF_LEVEL) # Do a band pass and significance filter.\n self.degenerateInvWaveletTransform() # Inverse transform filtered data.\n # Indicies where the error-filetered data is greater than thresh\n self.burstIdt = np.where(self.dataFlt > thresh)[0] \n self._getPeaks(ch, validDataIdt) # Find peaks\n return", "def wavematch(w, wp, sl, wlimit=10):\n\n # first remove anything already in the self.wp from the sl list\n lines = []\n for x in sl:\n if x not in wp:\n lines.append(x)\n if not lines:\n return -1\n lines = np.array(lines)\n\n # find the best match\n dist = abs(lines - w)\n if dist.min() < wlimit:\n i = dist.argmin()\n else:\n return -1\n\n # return the values\n return lines[i]", "def verb(filename,l,t,d,wout=True): #l = predelay d= decay smaller = less decay, t= number of delays\n#low l turns into chorus\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n data_ex=np.zeros(((n+l*t),ch))\n data_ex[0:n,:]=data\n data_Rex=np.zeros((len(data_ex),t,ch))\n print('Applying reverb...')\n for k in range (ch):\n for i in range (len(data)):\n for j in range(t):\n data_Rex[i+l*(j+1),j,k]=data_ex[i,k]*np.exp(-d*(j+1))\n data_F=data_ex\n print('Mixing...')\n for i in range (t):\n data_F=data_F+1*data_Rex[:,i,:]\n data_F=1*data_F\n data_verb=data_F+data_ex\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_verbed.wav',data_verb,sr,'PCM_16')\n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_verb", "def subsample(cfg, poses, targets, window_width=90, overlap=0.5):\n joint_len = cfg['num_of_joints']\n poses = np.reshape(poses, (poses.shape[0], poses.shape[1], joint_len, 2))\n \n ret_pose = []\n ret_target = []\n \n # iterate poses\n for i in range(len(poses)):\n step = int(-window_width*overlap) \n for ss_stop in range(poses.shape[1], 0, step): \n if ss_stop >= window_width:\n ss = poses[i,ss_stop - window_width:ss_stop]\n ret_pose.append(ss)\n ret_target.append(targets[i]) \n \n poses = np.reshape(ret_pose, (np.array(ret_pose).shape[0], np.array(ret_pose).shape[1],\n joint_len * 2))\n\n return np.array(poses), np.array(ret_target)", "def from_file_list(\n cls,\n audio_file_list,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n channel_selector=None,\n *args,\n **kwargs,\n ):\n if isinstance(channel_selector, int):\n # Shortcut when selecting a single channel\n if channel_selector >= len(audio_file_list):\n raise RuntimeError(\n f'Channel cannot be selected: channel_selector={channel_selector}, num_audio_files={len(audio_file_list)}'\n )\n # Select only a single file\n audio_file_list = [audio_file_list[channel_selector]]\n # Reset the channel selector since we applied it here\n channel_selector = None\n\n samples = None\n\n for a_file in audio_file_list:\n # Load audio from the current file\n a_segment = cls.from_file(\n a_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n channel_selector=None,\n trim=False, # Do not apply trim to individual files, it will be applied to the concatenated signal\n *args,\n **kwargs,\n )\n\n # Only single-channel individual files are supported for now\n if a_segment.num_channels != 1:\n raise RuntimeError(\n f'Expecting a single-channel audio signal, but loaded {a_segment.num_channels} channels from file {a_file}'\n )\n\n if target_sr is None:\n # All files need to be loaded with the same sample rate\n target_sr = a_segment.sample_rate\n\n # Concatenate samples\n a_samples = a_segment.samples[:, None]\n\n if samples is None:\n samples = a_samples\n else:\n # Check the dimensions match\n if len(a_samples) != len(samples):\n raise RuntimeError(\n f'Loaded samples need to have identical length: {a_samples.shape} != {samples.shape}'\n )\n\n # Concatenate along channel dimension\n samples = np.concatenate([samples, a_samples], axis=1)\n\n # Final setup for class initialization\n samples = np.squeeze(samples)\n sample_rate = target_sr\n\n return cls(\n samples, sample_rate, target_sr=target_sr, trim=trim, channel_selector=channel_selector, *args, **kwargs,\n )", "def bessel_wave(freq, alpha, beta):\n return FMFilter(SineWave(freq), SineWave(alpha), beta)", "def waveforms(self):\n return list(self._waveforms)", "def getSamples(self, section, pitch, target=\"beats\"):\n sample_list = audio.AudioQuantumList()\n if target == \"beats\":\n sample_list.extend([b for x in section.children() for b in x.children()]);\n elif target == \"bars\":\n sample_list.extend(section.children())\n return sample_list.that(overlap_ends_of(self.original.analysis.segments.that(have_pitch_max(pitch)).that(overlap_starts_of(sample_list))))", "def torch_big_sample(array, indexes, desired_shape):\n torch_arr = torch.tensor(array, dtype=torch.float32)\n indexed = torch_arr[[indexes[0], indexes[1]]]\n return indexed.reshape(desired_shape)\n #chunked = torch.chunk(indexed, desired_shape[0])\n #chunked = [chunk.reshape(desired_shape[1:]) for chunk in chunked]\n #out = torch.stack(chunked)", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num):\r\n Sxx = spectrogram_converter.mel_spectrogram(wav_path)\r\n for i in range(Sxx.shape[0]):\r\n for j in range(Sxx.shape[1]):\r\n X[index, 0, i, j] = Sxx[i, j]\r\n y[index] = curr_speaker_num\r\n return 1", "def kmer_seq_to_filters(kmers):\n\treturn np.concatenate([dna_string_to_array(s) for s in kmers])", "def get_words(self, indices):\n return [self.get_word(index) for index in indices]", "def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio", "def wav(self, data_type):\n return sorted(self._wav_paths[data_type])", "def load_wave_np(self):\r\n self.wavenpfileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Single File', 'M:/tnw/ist/do/projects/Neurophotonics/Brinkslab/Data',\"(*.npy)\") \r\n \r\n temp_loaded_container = np.load(self.wavenpfileName, allow_pickle=True)\r\n\r\n try:\r\n self.uiDaq_sample_rate = int(os.path.split(self.wavenpfileName)[1][20:-4])\r\n except:\r\n try:\r\n self.uiDaq_sample_rate = int(float(self.wavenpfileName[self.wavenpfileName.find('sr_')+3:-4])) #Locate sr_ in the file name to get sampling rate.\r\n except:\r\n self.uiDaq_sample_rate = 50000\r\n \r\n if self.uiDaq_sample_rate != int(self.SamplingRateTextbox.value()):\r\n print('ERROR: Sampling rates is different!')\r\n \r\n self.PlotDataItem_dict = {}\r\n self.waveform_data_dict = {}\r\n \r\n for i in range(len(temp_loaded_container)):\r\n \r\n channel_keyword = temp_loaded_container[i]['Sepcification']\r\n \r\n if channel_keyword != \"galvos_X_contour\" and channel_keyword != \"galvos_Y_contour\":\r\n self.waveform_data_dict[channel_keyword] = temp_loaded_container[i]['Waveform']\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword])", "def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)", "def spectral_whitening(arr, delta, freq_width, returnweight=False):\n npts = len(arr)\n carr = np.fft.fftshift(np.fft.fft(arr, 2 * npts))\n\n Nyfreq = 0.5 / delta\n spec_step = Nyfreq / npts\n if freq_width != None:\n winlen = int(0.5 * freq_width / spec_step) * 2 + 1\n weight = smooth_avg(np.abs(carr), winlen)\n if any(weight < 1e-8): raise Exception('Zero division')\n \n carr /= weight\n carr[weight<1e-8] = 0\n\n if returnweight:\n return carr[npts-1:2*npts], weight[npts-1:2*npts]\n else:\n return carr[npts-1:2*npts]", "def get_selected_muons(muons, trigobj, mask_events, mu_pt_cut_leading, mu_pt_cut_subleading, mu_aeta_cut, mu_iso_cut): \n passes_iso = muons.pfRelIso04_all < mu_iso_cut\n passes_id = muons.mediumId == 1\n passes_subleading_pt = muons.pt > mu_pt_cut_subleading\n passes_leading_pt = muons.pt > mu_pt_cut_leading\n passes_aeta = NUMPY_LIB.abs(muons.eta) < mu_aeta_cut\n \n trigobj.masks[\"mu\"] = (trigobj.id == 13)\n \n muons_matched_to_trigobj = NUMPY_LIB.invert(mask_deltar_first(muons, muons.masks[\"all\"], trigobj, trigobj.masks[\"mu\"], 0.1))\n \n #select muons that pass these cuts\n muons_passing_id = passes_iso & passes_id & passes_subleading_pt & muons_matched_to_trigobj\n \n #select events that have muons passing cuts \n events_passes_muid = sum_in_offsets(muons, muons_passing_id, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n events_passes_leading_pt = sum_in_offsets(muons, muons_passing_id & passes_leading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 1\n events_passes_subleading_pt = sum_in_offsets(muons, muons_passing_id & passes_subleading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n\n base_event_sel = mask_events & events_passes_muid & events_passes_leading_pt & events_passes_subleading_pt\n \n muons_passing_os = select_muons_opposite_sign(muons, muons_passing_id & passes_subleading_pt)\n events_passes_os = sum_in_offsets(muons, muons_passing_os, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) == 2\n \n final_event_sel = base_event_sel & events_passes_os\n final_muon_sel = muons_passing_id & passes_subleading_pt & muons_passing_os\n \n return {\n \"selected_events\": final_event_sel,\n \"selected_muons\": final_muon_sel,\n }", "def build_pulse_waveform(startper,endper):\r\n mywaveform = numpy.zeros(100, dtype=numpy.int)\r\n if startper > endper:\r\n mywaveform[0:endper]=1\r\n mywaveform[startper:100]=1\r\n else:\r\n mywaveform[startper:endper]=1 \r\n return mywaveform" ]
[ "0.7074208", "0.54926926", "0.543694", "0.5338126", "0.5282017", "0.5270855", "0.51453614", "0.5138771", "0.5112849", "0.5108346", "0.5105792", "0.5059946", "0.5033493", "0.5002626", "0.49823514", "0.49728918", "0.4955809", "0.4946704", "0.49403378", "0.49394882", "0.48933354", "0.48768497", "0.48729745", "0.48646903", "0.4860009", "0.4858704", "0.48454726", "0.48243514", "0.48228773", "0.48178145", "0.48176628", "0.48168096", "0.48167536", "0.4816096", "0.4811237", "0.48087183", "0.4808249", "0.47992718", "0.47924012", "0.4788218", "0.47820437", "0.47795704", "0.47788912", "0.47788912", "0.47775778", "0.47642893", "0.47489733", "0.47402045", "0.47305968", "0.47276968", "0.47275937", "0.47183967", "0.47156626", "0.47142103", "0.47127393", "0.47038975", "0.47007382", "0.47002295", "0.46897975", "0.46842617", "0.468365", "0.46805263", "0.46804732", "0.4675905", "0.46733156", "0.4669073", "0.46591702", "0.4658228", "0.4654912", "0.46544585", "0.46526328", "0.46440256", "0.46354464", "0.4633592", "0.4633592", "0.4630812", "0.46270922", "0.46233037", "0.46134466", "0.4612493", "0.4604562", "0.46022904", "0.4601441", "0.46011215", "0.46008673", "0.45990682", "0.45975575", "0.45962563", "0.45890746", "0.45874393", "0.45840722", "0.4581612", "0.45809177", "0.45798028", "0.45793006", "0.45780897", "0.45723814", "0.45646566", "0.45624813", "0.45600298" ]
0.70528
1
Gaussian_function of area=1 p[0] = A; p[1] = mean; p[2] = FWHM;
def unitary_Gauss(x, center, FWHM): sigma = np.abs(FWHM) /( 2 * np.sqrt(2 * np.log(2)) ); Amp = 1.0 / (sigma*np.sqrt(2*np.pi)) tau = -((x - center)**2) / (2*(sigma**2)) result = Amp * np.exp( tau ); return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def area_of_gaussian(amp, fwhm):\n return amp * fwhm / 0.93943727869965132", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def gaussian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * np.exp(-4 * np.log(2) * ((x - x0) / fwhm) ** 2)", "def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2", "def funcG(p, x):\n A, mu, sigma, zerolev = p\n return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def fspecial_gaussian(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def gaussianPSF(shape, sigma):\n psf = dg.drawGaussiansXY(shape,\n numpy.array([0.5*shape[0]]),\n numpy.array([0.5*shape[1]]),\n sigma = sigma)\n return psf/numpy.sum(psf)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(p, x):\n #2008-09-11 15:11 IJC: Created for LINEPROFILE\n # 2011-05-18 11:46 IJC: Moved to analysis.\n # 2013-04-11 12:03 IJMC: Tried to speed things up slightly via copy=False\n # 2013-05-06 21:42 IJMC: Tried to speed things up a little more.\n\n if not isinstance(x, np.ndarray):\n x = array(x, dtype=float, copy=False)\n\n if len(p)==3:\n p = array(p, copy=True)\n p = concatenate((p, [0]))\n #elif len(p)==4:\n # p = array(p, copy=False)\n\n return p[3] + p[0]/(p[1]*sqrt(2*pi)) * exp(-(x-p[2])**2 / (2*p[1]**2))", "def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x /= x.sum()\n return x", "def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)", "def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2", "def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def gaussian(var):\n stddev = np.sqrt(var)\n return stats.norm(0, stddev)", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def doubleGaussian(p, x):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n return gaussian(p[0:3], x) + gaussian(p[3:], x)", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def mean_sigma(h):\n h.Fit(\"gaus\", \"q\")\n result_fit = h.GetFunction(\"gaus\")\n mean = result_fit.GetParameter(1)\n sigma = result_fit.GetParameter(2)\n return mean, sigma", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def doubleGaussian(x, m1, s1, a1, m2, s2, a2):\n # primary peak\n g1 = np.exp(-0.5*((x-m1)/s1)**2)\n # secondary peak\n g2 = np.exp(-0.5*((x-m2)/s2)**2)\n # total model\n mod1 = 1 - a1 * g1\n mod2 = 1 - a2 * g2\n modt = mod1 + mod2 - 1\n return modt", "def gaussian(\n self,\n width=None,\n mfreq=None,\n chromaticity=None,\n dtype=None,\n power=True,\n ):\n widths, dtype = self._process_args(width, mfreq, chromaticity, dtype)\n response = np.exp(-0.5 * (self.xs / np.sin(widths)) ** 2)\n if power:\n response = response ** 2\n return response.astype(dtype)", "def gaussianDist(self, x, mu, var):\n val = 1/(math.sqrt(2 * math.pi * var)) * math.exp(-1 * (x - mu)**2 / (2*var))\n return val", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def onedgauss(x,H,A,dx,w):\n #H,A,dx,w = params\n return H+A*np.exp(-(x-dx)**2/(2*w**2))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def test_gaussian():\n x0 = Parameter('x0')\n sig = Parameter('sig', positive=True)\n x = Variable('x')\n\n new = sympy.exp(-(x - x0)**2/(2*sig**2))/sympy.sqrt((2*sympy.pi*sig**2))\n assert isinstance(new, sympy.Expr)\n g = Gaussian(x, x0, sig)\n assert issubclass(g.__class__, sympy.Expr)\n assert new == g\n\n # A pdf should always integrate to 1 on its domain\n assert sympy.integrate(g, (x, -sympy.oo, sympy.oo)) == 1", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def pdf(x):\n return - np.exp(self.ks_gaussian.score_samples(x.reshape(1, -1)))", "def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2", "def fun_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2))", "def gaussian1d(x, x0, w0, A, offset):\n if w0 == 0:\n return 0\n return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset", "def gauss_pert(N,a):\n x = np.arange(0,N,1,float)\n y = x[:,np.newaxis]\n\n # Choose the location of the peak\n x0 = y0 = int(0.4*N)\n\n # Choose the fwhm, 'width' of the perturbation\n fwhm = N/15\n\n return a*np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def flat_top_gaussian(a1_val, a2_val, sigma1, sigma2, w1_val, w2_val, w_val):\n gauss1 = a1_val * np.exp(-(w_val - w1_val)**4/(2 * sigma1**2))\n gauss2 = a2_val * np.exp(-(w_val - w2_val)**4/(2 * sigma2**4))\n sum_gauss = gauss1 + gauss2\n return sum_gauss", "def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p", "def reduced_normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x[mean] = 0.\n x /= x.sum()\n return x", "def doubleGaussianCen(p, x, mu1, mu2):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n param1 = [p[0], p[1], mu1, 0]\n if len(p)==4:\n param2 = [p[2], p[3], mu2, 0]\n elif len(p)==5:\n param2 = [p[2], p[3], mu2, p[4]]\n\n return gaussian(param1, x) + gaussian(param2, x)", "def pvalue_gaussian(self):\n \n pv = 2 * stats.norm.sf(abs(self.TS_prime_obs), loc=0, scale=1)\n return(pv)", "def gaussian_filter(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def func_gaussian(self, dmv, vpar):\n dmoff = dmv - vpar[0]\n sig = vpar[1]\n sig = sig * sig\n return np.exp(-0.5 * dmoff * dmoff / sig) * self.ThetaFunc(dmv)", "def gaussianOne(self, x=np.array([]), mu=0., sig=1.):\n\n lnConst = -0.5 * np.log(2.0 * np.pi)\n lnVar = -np.log(sig)\n lnBrack = -0.5 * ((x - mu) / sig)**2\n\n ### safety valve\n ###bBad = np.abs(lnBrack) > 7.\n ###lnBrack[bBad] = -np.inf\n\n return lnConst + lnVar + lnBrack", "def gauss(self, X, xm, amp, w):\n return amp * np.exp(-((X - xm) / w) ** 2)", "def agauss(self, X, xm, amp, w, a):\n # w(x) = 2 * w / (1 + np.exp(a * (X - xm)))\n return amp * np.exp(-((X - xm) / (2 * w / (1 + np.exp(a * (X - xm))))) ** 2)", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def gaussian(t, params):\n DeprecationWarning(\"Using standard width. Better use gaussian_sigma.\")\n params['sigma'] = Qty(\n value=params['t_final'].get_value()/6,\n min_val=params['t_final'].get_value()/8,\n max_val=params['t_final'].get_value()/4,\n unit=params['t_final'].unit\n )\n return gaussian_sigma(t, params)", "def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)", "def makeGaussian(height, width, sigma=3, center=None):\n x = np.arange(0, width, 1, float)\n y = np.arange(0, height, 1, float)[:, np.newaxis]\n if center is None:\n x0 = width // 2\n y0 = height // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def multivariateGaussian(X, mu, Sigma2):\n k = mu.shape[0]\n\n if Sigma2.shape[1] == 1 or Sigma2.shape[0] == 1:\n Sigma2 = np.diag(Sigma2[:, 0])\n\n X = (X-mu.T).copy()\n p = (2*np.pi)**(-k/2)*np.linalg.det(Sigma2)**-0.5\n p = p*np.exp(-0.5*(X.dot(np.linalg.pinv(Sigma2))*X).sum(1, keepdims=True))\n return p", "def gaussianNormalised(data, mu, sigma):\n data = data - mu\n g = exp ( - data**2 / (2*sigma**2) )\n gSum = np.sum(g)\n \n if gSum == 0:\n print \"Warning gaussianNormalised:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g)\n else:\n return (g / gSum)", "def fun_gauss_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2)) + p[3] * np.exp(-((r/p[2])**2))", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def f1_post_mean(self,f1,n_samp=5000):\n assert len(f1.shape)==1, 'input must be 1d ndarray'\n n_trial = len(f1)\n # Takes 1d array as input\n # sensory noise\n f1_ = np.tile(f1,(n_samp,1)) + self.s_s*np.random.randn(n_samp,n_trial)\n s = (1./self.s_g**2 + 1./self.s_s**2)**(-1./2.)\n mu = s**2*(self.mu_g/self.s_g**2 + f1_/self.s_s**2)\n if self.h == 0:\n return np.mean(mu,axis=0)\n else:\n # posterior inference\n #k = 1./np.sqrt(2.*np.pi)*s/(self.s_g*self.s_s)\\\n # *np.exp(-0.5*(self.mu_g**2/self.s_g**2+f1_**2/self.s_s**2-mu**2/s**2))\n k_g = s/self.s_s\\\n *np.exp(-0.5*(self.mu_g**2/self.s_g**2+f1_**2/self.s_s**2-mu**2/s**2)) # for unnormalized gaussian prior\n pi_u = self.h/(self.h+k_g)\n pi_g = 1.-pi_u\n\n return np.mean(pi_u*f1_+pi_g*mu,axis=0)", "def gaussian_template(\n wavelengths: np.ndarray,\n mean: Union[float, np.ndarray],\n std: Union[float, np.ndarray] = 30.0,\n) -> np.ndarray:\n y = norm.pdf(wavelengths, mean, std)\n return y / np.max(y, axis=-1, keepdims=True)", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def gaussian(gp_link=None, variance=2, D=None, N=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Identity()\r\n analytical_mean = True\r\n analytical_variance = True # ?\r\n return noise_models.gaussian_noise.Gaussian(gp_link, analytical_mean,\r\n analytical_variance, variance=variance, D=D, N=N)", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def gaussian(T, Y, X, t, y, x, sigma, sigma_t=1):\n const_value = np.sqrt(2 * np.pi * sigma) ** 3\n norm = np.exp(\n -(\n ((X - x) ** 2) / (2 * sigma ** 2)\n + ((Y - y) ** 2) / (2 * sigma ** 2)\n + ((T - t) ** 2) / (2 * sigma_t ** 2)\n )\n )\n return norm / const_value", "def gauss2(x,a1,c1,w1,a2,c2,w2):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)", "def gaussian_likelihood(input_, mu_, log_std):\n pre_sum = -0.5 * (((input_ - mu_) / (\n tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(\n 2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def gauss3(x,a1,c1,w1,a2,c2,w2,a3,c3,w3):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def Gaussian(x, t, sigma):\n return np.exp(-(x - t)**2 / (2 * sigma**2))", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def gaussian(z, mean=0, stdev=None, sigma=1):\n sigma = stdev if stdev is not None else sigma\n norm = stats.norm(loc=mean, scale=sigma)\n pdf = norm.pdf(z, loc=mean, scale=sigma)\n return z, pdf", "def fun_exp_p_gauss(p,r):\n return p[1] * np.exp(-((r**2/p[0]))) + p[3] * np.exp(-((np.abs(r)/p[2])))", "def phi_gauss(self,x,i):\n s = 0.1\n return np.exp(-(x-self.mu[i])**2/(2*s))", "def gaussian(x, peak_x=.0, sigma=1.0, name=''):\n x = x.astype(np.float)\n variables = {'function': gaussian, 'peak_x': peak_x, 'sigma': sigma}\n y = np.exp((-1 * (x - peak_x)**2) / (2 * sigma**2))\n return packer(x, y, variables, name=name)", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def makeGaussian(size, fwhm, sigma, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n \n #return (np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)) #approximation using exponenial functions\n return ((1/(2*np.pi*sigma**2))*np.exp(-((xx)**2 + (yy)**2)/(2*sigma**2))) # symmetric 2D Gaussian distribution", "def FWHM(self):\n # The width of the Lorentz profile\n fl = 2.0 * self[\"al\"]\n # Width of the Gaussian [2.35 = 2*sigma*sqrt(2*ln(2))]\n fd = 2.35482 * self['ad']\n return 0.5346 * fl + numpy.sqrt(0.2166 * (fl**2.) + fd**2.)", "def mean(self):\n return math.exp(self.mu + (self.sigma ** 2) / 2)", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])" ]
[ "0.7608028", "0.75165457", "0.6953791", "0.69522965", "0.6922625", "0.6717248", "0.6687247", "0.6661979", "0.66529137", "0.66487074", "0.6572644", "0.65556943", "0.65399146", "0.6530924", "0.6530924", "0.6530924", "0.6530346", "0.6524894", "0.65182394", "0.64871186", "0.64804256", "0.6475199", "0.6472436", "0.6467419", "0.64448726", "0.6431542", "0.6420231", "0.64110774", "0.6362523", "0.6347153", "0.63392013", "0.6339054", "0.63125265", "0.6252101", "0.6246736", "0.62396145", "0.62258744", "0.6221435", "0.6220813", "0.62103575", "0.6198822", "0.6157173", "0.61568046", "0.61559165", "0.6139995", "0.61280453", "0.6128008", "0.61277115", "0.61155576", "0.6112512", "0.6093256", "0.6075634", "0.60719746", "0.60677475", "0.6065964", "0.6051974", "0.60329515", "0.60187125", "0.6005329", "0.59964985", "0.59908867", "0.598963", "0.59788936", "0.59758973", "0.5975166", "0.5964615", "0.5943387", "0.59423745", "0.5938948", "0.59327227", "0.5911756", "0.59109664", "0.59023666", "0.5898293", "0.5885649", "0.5876881", "0.58735126", "0.58724046", "0.5856644", "0.58565784", "0.58555096", "0.58340204", "0.5832975", "0.5831584", "0.58295786", "0.5827117", "0.5824827", "0.5815148", "0.5814065", "0.5808226", "0.5807682", "0.5800221", "0.57999426", "0.57984054", "0.5796783", "0.57961714", "0.57880193", "0.5782485", "0.57782453", "0.5777725" ]
0.59211177
70
IP convolution multiplication step for a single wavelength value
def fast_convolve(wav_val, R, wav_extended, flux_extended, FWHM_lim): FWHM = wav_val/R index_mask = (wav_extended > (wav_val - FWHM_lim*FWHM)) & (wav_extended < (wav_val + FWHM_lim*FWHM)) flux_2convolve = flux_extended[index_mask] IP = unitary_Gauss(wav_extended[index_mask], wav_val, FWHM) sum_val = np.sum(IP*flux_2convolve) unitary_val = np.sum(IP*np.ones_like(flux_2convolve)) # Effect of convolution onUnitary. For changing number of points return sum_val/unitary_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n if self.signal_length is None:\n self.signal_length = x.shape[-1]\n self.channels = x.shape[-2]\n self._scales = self.compute_optimal_scales()\n self._kernel = self._build_wavelet_bank()\n\n if self._kernel.is_complex():\n self._kernel_real = self._kernel.real\n self._kernel_imag = self._kernel.imag\n\n x = x.unsqueeze(1)\n\n if self._kernel.is_complex():\n if (\n x.dtype != self._kernel_real.dtype\n or x.device != self._kernel_real.device\n ):\n self._kernel_real = self._kernel_real.to(device=x.device, dtype=x.dtype)\n self._kernel_imag = self._kernel_imag.to(device=x.device, dtype=x.dtype)\n\n output_real = nn.functional.conv2d(\n x, self._kernel_real, padding=1, stride=self.stride\n )\n output_imag = nn.functional.conv2d(\n x, self._kernel_imag, padding=1, stride=self.stride\n )\n output_real = torch.transpose(output_real, 1, 2)\n output_imag = torch.transpose(output_imag, 1, 2)\n\n if self.output_format == \"Magnitude\":\n return torch.sqrt(output_real ** 2 + output_imag ** 2)\n else:\n return torch.stack([output_real, output_imag], -1)\n\n else:\n if x.device != self._kernel.device:\n self._kernel = self._kernel.to(device=x.device, dtype=x.dtype)\n\n output = nn.functional.conv2d(\n x, self._kernel, padding=1, stride=self.stride\n )\n return torch.transpose(output, 1, 2)", "def loop_conv(X, W):\n # Go over all five dimensions \n # (#batches x #channels x #height x #width x #dur/length )\n # with filter that has\n # #filters x #channels x #height x #width x #dur/length \n num_filters = W.shape[0]\n filt_channels = W.shape[1]\n filt_height = W.shape[2]\n filt_width = W.shape[3]\n filt_duration = W.shape[4]\n num_batches = X.shape[0]\n input_channels = X.shape[1]\n assert(filt_channels == input_channels)\n out_shape = compute_out_shape(X.shape, W.shape)\n out_height = out_shape[2]\n out_width = out_shape[3]\n out_duration = out_shape[4]\n \n # The output is H :)\n H = np.zeros((out_shape))\n for batch_i in xrange(0, num_batches):\n for filt_i in xrange(0, num_filters):\n for out_x in xrange(0, out_height):\n for out_y in xrange(0, out_width):\n for out_z in xrange(0, out_duration):\n for chan_i in xrange(0, filt_channels):\n for filt_x in xrange(0, filt_height):\n for filt_y in xrange(0, filt_width):\n for filt_z in xrange(0, filt_duration):\n weight = W[filt_i, chan_i, filt_x, filt_y, filt_z]\n input_val = X[batch_i, chan_i, \\\n out_x + filt_x, out_y + filt_y, out_z + filt_z]\n H[batch_i, filt_i, out_x, out_y, out_z] += \\\n weight * input_val\n return H", "def convolution_nir(wav, flux, chip, R, FWHM_lim=5.0, plot=True):\n \n wav_chip, flux_chip = chip_selector(wav, flux, chip)\n #we need to calculate the FWHM at this value in order to set the starting point for the convolution\n \n FWHM_min = wav_chip[0]/R #FWHM at the extremes of vector\n FWHM_max = wav_chip[-1]/R \n \n #wide wavelength bin for the resolution_convolution\n wav_extended, flux_extended = fast_wav_selector(wav, flux, wav_chip[0]-FWHM_lim*FWHM_min, wav_chip[-1]+FWHM_lim*FWHM_max) \n print(\"wav_extended type\", type(wav_extended))\n wav_extended = np.array(wav_extended, dtype=\"float64\")\n print(\"wav_extended type after arrayed\", type(wav_extended)) # should be the same. \n flux_extended = np.array(flux_extended, dtype=\"float64\")\n \n print(\"Starting the Resolution convolution...\")\n # Predefine np array space\n flux_conv_res = np.empty_like(wav_chip, dtype=\"float64\")\n counter = 0 \n base_val = len(wav_chip)//20 # Adjust here to change % between reports\n \n for n, wav in enumerate(wav_chip):\n # put value directly into the array\n flux_conv_res[n] = fast_convolve(wav, R, wav_extended, flux_extended, FWHM_lim)\n if(n%base_val== 0):\n counter = counter+5\n print(\"Resolution Convolution at {}%%...\".format(counter))\n \n print(\"flux conv res type after loop\", type(flux_conv_res))\n flux_conv_res = np.array(flux_conv_res, dtype=\"float64\")\n print(\"flux conv res type after np.array\", type(flux_conv_res))\n \n print(\"Done.\\n\")\n \n if(plot):\n fig=plt.figure(1)\n plt.xlabel(r\"wavelength [ $\\mu$m ])\")\n plt.ylabel(r\"flux [counts] \")\n plt.plot(wav_chip, flux_chip/np.max(flux_chip), color ='k', linestyle=\"-\", label=\"Original spectra\")\n plt.plot(wav_chip, flux_conv_res/np.max(flux_conv_res), color ='b', linestyle=\"-\", label=\"Spectrum observed at and R=%d .\" % (R))\n plt.legend(loc='best')\n plt.show() \n return wav_chip, flux_conv_res", "def clConvolution(self, size, mask):", "def conv2d_forward(x, w, b, pad, stride):\n #raise NotImplementedError\n \n\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n ba,h,wd,c=x.shape\n f,fh,fw,c=w.shape\n n_h=((h-fh+2*pad)//stride)+1\n n_w=((wd-fw+2*pad)//stride)+1\n x_paded=np.pad(x,pad,'constant')\n temp_dim=x_paded.shape[3]\n #print(temp_dim)\n out=np.zeros((ba,n_h,n_w,f))\n for m in range(0,ba):\n for i in range(0,n_h):\n for j in range(0,n_w):\n for n in range(0,f):\n h_t=i*stride\n h_t2=i*stride+fh\n w_t=j*stride\n w_t2=j*stride+fw\n temp=x_paded[pad+m,h_t:h_t2,w_t:w_t2,pad:temp_dim-pad] \n out[m,i,j,n]=np.sum(temp*w[n,:,:,:])+b[n]\n \n return out", "def miccai2018_net_2channel(vol_size, enc_nf, dec_nf, int_steps=7, use_miccai_int=False, indexing='ij', bidir=False, vel_resize=1/2): \n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get unet\n unet_model = unet_core_2channel(vol_size, enc_nf, dec_nf, full_size=False)\n [src1, tgt1, src2, tgt2] = unet_model.inputs\n [x1, x2, x_out] = unet_model.outputs[-1]\n\n # velocity mean and logsigma layers\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_mean1 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow1')(x1)\n flow_mean2 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow2')(x2)\n flow_mean = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x_out)\n # we're going to initialize the velocity variance very low, to start stable.\n flow_log_sigma1 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma1')(x1)\n flow_log_sigma2 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma2')(x2)\n flow_log_sigma = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma')(x_out)\n flow_params = concatenate([flow_mean, flow_log_sigma])\n\n # velocity sample\n flow1 = Sample(name=\"z_sample1\")([flow_mean1, flow_log_sigma1])\n flow2 = Sample(name=\"z_sample2\")([flow_mean2, flow_log_sigma2])\n flow = Sample(name=\"z_sample\")([flow_mean, flow_log_sigma])\n\n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample1 = flow1\n flow1 = nrn_layers.VecInt(method='ss', name='flow-int1', int_steps=int_steps)(z_sample1)\n z_sample2 = flow2\n flow2 = nrn_layers.VecInt(method='ss', name='flow-int2', int_steps=int_steps)(z_sample2)\n z_sample = flow\n flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample1 = Negate()(z_sample1)\n neg_flow1 = nrn_layers.VecInt(method='ss', name='neg_flow-int1', int_steps=int_steps)(rev_z_sample1)\n rev_z_sample2 = Negate()(z_sample2)\n neg_flow2 = nrn_layers.VecInt(method='ss', name='neg_flow-int2', int_steps=int_steps)(rev_z_sample2)\n rev_z_sample = Negate()(z_sample)\n neg_flow = nrn_layers.VecInt(method='ss', name='neg_flow-int', int_steps=int_steps)(rev_z_sample)\n\n # get up to final resolution\n flow1 = trf_resize(flow1, vel_resize, name='diffflow1')\n flow2 = trf_resize(flow2, vel_resize, name='diffflow2')\n flow = trf_resize(flow, vel_resize, name='diffflow')\n\n if bidir:\n neg_flow1 = trf_resize(neg_flow1, vel_resize, name='neg_diffflow1')\n neg_flow2 = trf_resize(neg_flow2, vel_resize, name='neg_diffflow2')\n neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')\n\n # transform\n y_flow1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src1, flow1])\n y_flow2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src2, flow2])\n y1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src1, flow])\n y2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src2, flow])\n if bidir:\n y_tgt_flow1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt1, neg_flow1])\n y_tgt_flow2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt2, neg_flow2])\n y_tgt1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt1, neg_flow])\n y_tgt2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt2, neg_flow])\n\n # prepare outputs and losses\n outputs = [y1, y2, flow_params, y_flow1, y_flow2]\n if bidir:\n outputs = [y1, y_tgt1,y2, y_tgt2, flow_params, y_flow1,y_tgt_flow1, y_flow2,y_tgt_flow2]\n\n # build the model\n return Model(inputs=[src1, tgt1, src2, tgt2], outputs=outputs)", "def test_positional_convolution_forward(ctx):\n # num_batch * channel * height * width input\n # i.e. (2, 2, 6, 6)\n in_data = \\\n mx.nd.array(\n [\n [[[1, 2, -1, 0, 1, 1],\n [3, 6, -5, 4, 2, -2],\n [9, 6, -1, 3, 1, 3],\n [4, 2, 5, 7, 3, 1],\n [0, 1, 1, 2, 2, 1],\n [3, 1, 2, 4, 3, 3]],\n\n [[3, 1, 2, 4, 3, 3],\n [0, 1, 1, 2, 2, 1],\n [4, 2, 5, 7, 3, 1],\n [9, 6, -1, 3, 1, 3],\n [3, 6, -5, 4, 2, -2],\n [1, 2, -1, 0, 1, 1]]],\n [[[1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1],\n [0, 0, 1, 1, 2, 2],\n [3, 3, 0, -1, -1, -2],\n [3, 1, 0, 3, 3, 2],\n [5, 6, 7, -1, -2, 0]],\n\n [[5, 6, 7, -1, -2, 0],\n [3, 1, 0, 3, 3, 2],\n [3, 3, 0, -1, -1, -2],\n [0, 0, 1, 1, 2, 2],\n [6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6]]]\n ], ctx=ctx)\n\n # num_filter * channel * K * K weight\n # i.e. (2, 2, 3, 3)\n weight = \\\n mx.nd.array(\n [\n [[[1, 0, 1],\n [0, 2, -1],\n [2, 3, 1]],\n\n [[1, 1, 0],\n [2, -1, 2],\n [3, -2, 4]]],\n\n [[[0, 1, 2],\n [-1, 2, 3],\n [4, 1, -5]],\n\n [[3, 0, -1],\n [-1, 2, 1],\n [5, 6, 2]]]\n ], ctx=ctx)\n\n # num_batch * channel * out_height * out_width scale\n # i.e. (2, 2, 6, 6)\n scale = \\\n mx.nd.array(\n [\n [[[1, 1, 1, 1, 1, 1],\n [1, -1, 1, -1, 1, -1],\n [-1, 1, -1, 1, -1, 1],\n [-1, -1, -1, -1, -1, -1],\n [2, 1, 2, 2, 1, 1],\n [1, 2, 1, 2, 1, 2]],\n\n [[1, 1, 1, 1, 1, 1],\n [1, -1, -1, 1, 1, 1],\n [-1, 1, -1, 1, -1, 1],\n [1, -1, -1, -1, -1, 1],\n [2, -1, 2, -2, 1, 1],\n [1, 2, 1, 2, 1, 2]]],\n\n [[[6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6],\n [1, -1, 2, -2, 3, -3],\n [4, -4, 5, -5, 6, -6],\n [1, 1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1, -1]],\n\n [[-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1],\n [4, -4, 5, -5, 6, -6],\n [1, -1, 2, -2, 3, -3],\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1]]],\n ], ctx=ctx)\n\n # num_filter bias\n # i.e. (2, )\n bias = \\\n mx.nd.array(\n [1, 2], ctx=ctx)\n\n in_data_var = mx.symbol.Variable(name=\"in_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n scale_var = mx.symbol.Variable(name=\"scale\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n\n op = mx.symbol.contrib.PositionalConvolution(name='test_positional_convolution',\n data=in_data_var,\n scale=scale_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=2,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n be = op.bind(ctx=ctx, args={'in_data': in_data,\n 'scale': scale,\n 'weight': weight,\n 'bias': bias})\n be.forward(True)\n out_o = be.outputs[0].asnumpy()\n print(out_o)", "def forward(self, xs, ilens, masks):\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs, _ = self.encoders(xs, masks)\n if self.normalize_before:\n xs = self.after_norm(xs)\n hlens = [xs.size(1) for i in range(xs.size(0))]\n return xs, hlens", "def _process(self, X):\n # 周波数毎に実施する\n ones = np.ones(self.L.shape[1])\n\n # 初期のポジションベクトル\n n_channels = np.shape(X)[0]\n n_freq_bins = np.shape(X)[1]\n n_frames = np.shape(X)[2]\n\n d = None\n n_mic_pair = 0\n # for m1 in range(1):\n\n step = 2\n\n mic_pairs = self.mic_pairs\n # mic_pairs=[[m1,m2] for m1 in range(n_channels-1) for m2 in range(m1+1,np.minimum(m1+step+1,n_channels)) ]\n mic_pairs = np.array(mic_pairs)\n\n n_mic_pair = np.shape(mic_pairs)[0]\n d = np.array(self.mic_positions[mic_pairs[:, 1]]) - np.array(\n self.mic_positions[mic_pairs[:, 0]]\n )\n # d: n_mic_pair,dim\n\n # for the linear surrogate function, we need the smallest eigenvalue\n # of the covariance matrix of the microphone pairs\n if self.mm_type == SurrogateType.Linear:\n mic_diff_cov = d.T @ d\n mic_diff_cov_ev_max = np.linalg.eigvalsh(mic_diff_cov)[-1]\n else:\n mic_diff_cov_ev_max = None\n\n # 時間周波数毎の初期のポジションベクトル\n position_vector = np.zeros(shape=(n_freq_bins, n_frames, self.dim))\n\n X_temp = X[:, self.freq_bins, :]\n\n sigma = np.angle(X_temp[mic_pairs[:, 1], ...] / X_temp[mic_pairs[:, 0], ...])\n sigma = np.transpose(sigma, (1, 2, 0))\n\n sigma = np.where(np.abs(sigma) < 1.0e-18, np.zeros_like(sigma) + 1.0e-18, sigma)\n z = np.zeros(shape=(n_freq_bins, n_frames, n_mic_pair), dtype=np.int)\n x = np.random.normal(size=n_freq_bins * n_frames * n_mic_pair)\n x = np.reshape(x, newshape=(n_freq_bins, n_frames, n_mic_pair))\n # 初期化\n mode_vec = self.rough_mode_vec[self.freq_bins, :, :]\n mode_vec = np.conjugate(mode_vec)\n\n # Evaluation of the cost function on rough grid\n XX = X[:, self.freq_bins, :].transpose([1, 2, 0]) # (freq, time, chan)\n mv = mode_vec.transpose([0, 2, 1]) # (freq, grid, chan)\n prod = (mv[:, None, :, :] @ XX[:, :, :, None])[..., 0]\n\n amp = np.abs(prod)\n # ft\n index = np.argmax(amp, axis=-1)\n org_shape = np.shape(index)\n index = np.reshape(index, [-1])\n\n # indexに相当する方向を取る\n if self.dim == 2:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n # ダミー\n rough_colatitude_recon = np.zeros_like(rough_azimuth_recon) + np.pi\n elif self.dim == 3:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n rough_colatitude_recon = self.rough_grid.colatitude[index]\n\n doas = np.concatenate(\n (\n rough_colatitude_recon[:, None], # colatitude [0, pi]\n rough_azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n\n # source_locations: 3, n_frames\n source_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n source_locations = np.reshape(source_locations, (3, org_shape[0], org_shape[1]))\n\n position_vector[self.freq_bins, :, :] = np.transpose(\n source_locations[: self.dim, :, :], (1, 2, 0)\n )\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n use_clustering = False\n cluster_index = np.random.randint(0, self.num_src, size=n_freq_bins * n_frames)\n cluster_index = np.reshape(cluster_index, (n_freq_bins, n_frames))\n cluster_center = np.random.normal(size=self.num_src * self.dim)\n cluster_center = np.reshape(cluster_center, newshape=(self.num_src, self.dim))\n size = np.einsum(\"ci,ci->c\", np.conjugate(cluster_center), cluster_center)\n size = np.sqrt(size)[..., np.newaxis]\n cluster_center = cluster_center / np.maximum(size, 1.0e-18)\n if use_clustering == True:\n # pを作る\n for k in self.freq_bins:\n for l in range(n_frames):\n position_vector[k, l, :] = cluster_center[cluster_index[k, l], :]\n\n est_p = position_vector[self.freq_bins, ...]\n z = z[self.freq_bins, ...]\n x = x[self.freq_bins, ...]\n freqs = self.freq_hz\n cluster_index = cluster_index[self.freq_bins, ...]\n\n silent_mode = True\n freqs_d = np.einsum(\"f,pi->fpi\", freqs, d)\n for i in range(self.n_mm_itertaions):\n #\n (\n org_cost_0,\n org_cost_1,\n org_cost_2,\n org_cost_3,\n cost_0,\n cost_1,\n cost_2,\n cost_3,\n est_p,\n z,\n x,\n ) = doa_estimation_one_iteration(\n freqs_d,\n est_p,\n sigma,\n z,\n x,\n cluster_index=cluster_index,\n cluster_center=cluster_center,\n iter_num2=self.rooting_n_iter,\n silent_mode=silent_mode,\n surrogate=self.mm_type,\n mic_diff_cov_ev_max=mic_diff_cov_ev_max,\n freqs=freqs,\n mic_diff=d,\n )\n if silent_mode == False:\n print(\"Cost function:\", org_cost_0)\n # est_pから\n # fti\n position_vector[self.freq_bins, ...] = est_p\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n # gridを探す\n\n # position_vectorに相当する方向を取る\n if self.dim == 2:\n azimuth_recon = self.grid.azimuth\n # ダミー\n colatitude_recon = np.zeros_like(azimuth_recon) + np.pi\n elif self.dim == 3:\n azimuth_recon = self.grid.azimuth\n colatitude_recon = self.grid.colatitude\n\n doas = np.concatenate(\n (\n colatitude_recon[:, None], # colatitude [0, pi]\n azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n # source_locations: 3, n_grid_num\n grid_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n size = np.einsum(\"in,in->n\", np.conjugate(grid_locations), grid_locations)\n size = np.sqrt(size)[np.newaxis, ...]\n grid_locations = grid_locations / np.maximum(size, 1.0e-18)\n\n if not self.use_kd_tree:\n grid_index_buf = []\n for k in self.freq_bins:\n prod = np.einsum(\"in,ti->tn\", grid_locations, position_vector[k, ...])\n grid_index = np.argmax(prod, axis=-1)\n grid_index_buf.append(grid_index)\n grid_index_buf = np.array(grid_index_buf)\n\n spire_cost = np.zeros(self.grid.n_points)\n for n in range(self.grid.n_points):\n spire_cost[n] = spire_cost[n] + np.count_nonzero(grid_index_buf == n)\n\n else:\n\n # Same code, but with a kd-tree (Robin version)\n dim = position_vector.shape[-1]\n pv = position_vector[self.freq_bins, ...].reshape((-1, dim))\n _, nn = self.tree.query(pv)\n bin_indices, bin_count = np.unique(nn, return_counts=True)\n\n spire_cost = np.zeros(self.grid.n_points, dtype=np.float)\n spire_cost[bin_indices] = bin_count\n\n self.grid.set_values(spire_cost)", "def wire(self):\n self.J[self.size[0]//2, self.size[1]//2, :] = 1.0 / self.size[2]", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def miccai2018_net(vol_size, enc_nf, dec_nf, int_steps=7, use_miccai_int=False, indexing='ij', bidir=False, vel_resize=1/2): \n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get unet\n unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=False)\n [src, tgt] = unet_model.inputs\n x_out = unet_model.outputs[-1]\n\n # velocity mean and logsigma layers\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_mean = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x_out)\n # we're going to initialize the velocity variance very low, to start stable.\n flow_log_sigma = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma')(x_out)\n flow_params = concatenate([flow_mean, flow_log_sigma])\n\n # velocity sample\n flow = Sample(name=\"z_sample\")([flow_mean, flow_log_sigma])\n\n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample = flow\n flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample = Negate()(z_sample)\n neg_flow = nrn_layers.VecInt(method='ss', name='neg_flow-int', int_steps=int_steps)(rev_z_sample)\n\n # get up to final resolution\n flow = trf_resize(flow, vel_resize, name='diffflow0')\n\n if bidir:\n neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')\n\n # transform\n y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing, name='transform')([src, flow])\n if bidir:\n y_tgt = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt, neg_flow])\n\n # prepare outputs and losses\n outputs = [y, flow_params]\n if bidir:\n outputs = [y, y_tgt, flow_params]\n\n # build the model\n return Model(inputs=[src, tgt], outputs=outputs)", "def convolution_as_maultiplication(I, F, print_ir=False):\n # number of columns and rows of the input \n I_row_num, I_col_num = I.shape \n\n # number of columns and rows of the filter\n F_row_num, F_col_num = F.shape\n\n # calculate the output dimensions\n output_row_num = I_row_num + F_row_num - 1\n output_col_num = I_col_num + F_col_num - 1\n if print_ir: print('output dimension:', output_row_num, output_col_num)\n\n # zero pad the filter\n F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),\n (0, output_col_num - F_col_num)),\n 'constant', constant_values=0)\n if print_ir: print('F_zero_padded: ', F_zero_padded)\n\n # use each row of the zero-padded F to creat a toeplitz matrix. \n # Number of columns in this matrices are same as numbe of columns of input signal\n toeplitz_list = []\n for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row\n c = F_zero_padded[i, :] # i th row of the F \n r = np.r_[c[0], np.zeros(I_col_num-1)] # first row for the toeplitz fuction should be defined otherwise\n # the result is wrong\n toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library\n toeplitz_list.append(toeplitz_m)\n if print_ir: print('F '+ str(i)+'\\n', toeplitz_m)\n\n # doubly blocked toeplitz indices: \n # this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked\n c = range(1, F_zero_padded.shape[0]+1)\n r = np.r_[c[0], np.zeros(I_row_num-1, dtype=int)]\n doubly_indices = toeplitz(c, r)\n if print_ir: print('doubly indices \\n', doubly_indices)\n\n ## creat doubly blocked matrix with zero values\n toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix\n h = toeplitz_shape[0]*doubly_indices.shape[0]\n w = toeplitz_shape[1]*doubly_indices.shape[1]\n doubly_blocked_shape = [h, w]\n doubly_blocked = np.zeros(doubly_blocked_shape)\n\n # tile toeplitz matrices for each row in the doubly blocked matrix\n b_h, b_w = toeplitz_shape # hight and withs of each block\n for i in range(doubly_indices.shape[0]):\n for j in range(doubly_indices.shape[1]):\n start_i = i * b_h\n start_j = j * b_w\n end_i = start_i + b_h\n end_j = start_j + b_w\n doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]\n\n if print_ir: print('doubly_blocked: ', doubly_blocked)\n\n # convert I to a vector\n vectorized_I = matrix_to_vector(I)\n if print_ir: print('vectorized_I: ', vectorized_I)\n \n # get result of the convolution by matrix mupltiplication\n result_vector = np.matmul(doubly_blocked, vectorized_I)\n if print_ir: print('result_vector: ', result_vector)\n\n # reshape the raw rsult to desired matrix form\n out_shape = [output_row_num, output_col_num]\n output = vector_to_matrix(result_vector, out_shape)\n if print_ir: print('Result of implemented method: \\n', output)\n \n return output", "def input_wh_mul() -> int:\n max_pool_layers = 5\n return 2 ** max_pool_layers", "def Ip(self, freq):", "def app(data_pupil,data_phase,oversize=4):\n complexr=app_complex(data_pupil,data_phase,oversize)\n amp=(abs(complexr)**2)\n return amp", "def forward(self, image):\n height, width = image.shape\n H_out, W_out = output_shape(height, width, self.filter_size, self.padding, self.stride)\n output = np.zeros((H_out, W_out, self.num_filters))\n padded_image = pad_2d(image, self.padding)\n for patch, i, j in self.image_patch(padded_image):\n output[i,j] = np.sum(patch*self.conv_filter, axis=(1,2))\n return output", "def _process(self, X):\n # 周波数毎に実施する\n ones = np.ones(self.L.shape[1])\n\n spire_cost = np.zeros(self.grid.n_points)\n\n # 初期のポジションベクトル\n n_channels = np.shape(X)[0]\n n_freq_bins = np.shape(X)[1]\n n_frames = np.shape(X)[2]\n\n d = None\n n_mic_pair = 0\n # for m1 in range(1):\n\n step = 2\n\n mic_pairs = self.mic_pairs\n # mic_pairs=[[m1,m2] for m1 in range(n_channels-1) for m2 in range(m1+1,np.minimum(m1+step+1,n_channels)) ]\n mic_pairs = np.array(mic_pairs)\n\n n_mic_pair = np.shape(mic_pairs)[0]\n d = np.array(self.mic_positions[mic_pairs[:, 1]]) - np.array(\n self.mic_positions[mic_pairs[:, 0]]\n )\n # d: n_mic_pair,dim\n\n # 時間周波数毎の初期のポジションベクトル\n position_vector = np.zeros(shape=(n_freq_bins, n_frames, self.dim))\n\n X_temp = X[:, self.freq_bins, :]\n\n sigma = np.angle(X_temp[mic_pairs[:, 1], ...] / X_temp[mic_pairs[:, 0], ...])\n sigma = np.transpose(sigma, (1, 2, 0))\n\n sigma = np.where(np.abs(sigma) < 1.0e-18, np.zeros_like(sigma) + 1.0e-18, sigma)\n z = np.zeros(shape=(n_freq_bins, n_frames, n_mic_pair), dtype=np.int)\n x = np.random.normal(size=n_freq_bins * n_frames * n_mic_pair)\n x = np.reshape(x, newshape=(n_freq_bins, n_frames, n_mic_pair))\n # 初期化\n mode_vec = self.rough_mode_vec[self.freq_bins, :, :]\n mode_vec = np.conjugate(mode_vec)\n prod = np.einsum(\"fmi,mft->fti\", mode_vec, X[:, self.freq_bins, :])\n # prod=np.einsum(\"mi,mt->ti\",mode_vec,X[:,k,:])\n amp = np.abs(prod)\n # ft\n index = np.argmax(amp, axis=-1)\n org_shape = np.shape(index)\n index = np.reshape(index, [-1])\n\n # indexに相当する方向を取る\n if self.dim == 2:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n # ダミー\n rough_colatitude_recon = np.zeros_like(rough_azimuth_recon) + np.pi\n elif self.dim == 3:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n rough_colatitude_recon = self.rough_grid.colatitude[index]\n\n doas = np.concatenate(\n (\n rough_colatitude_recon[:, None], # colatitude [0, pi]\n rough_azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n\n # source_locations: 3, n_frames\n source_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n source_locations = np.reshape(source_locations, (3, org_shape[0], org_shape[1]))\n\n position_vector[self.freq_bins, :, :] = np.transpose(\n source_locations[: self.dim, :, :], (1, 2, 0)\n )\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n est_p = position_vector[self.freq_bins, ...]\n z = z[self.freq_bins, ...]\n x = x[self.freq_bins, ...]\n freqs = self.freq_hz\n cluster_index = cluster_index[self.freq_bins, ...]\n\n silent_mode = True\n freqs_d = np.einsum(\"f,pi->fpi\", freqs, d)\n x_non_const_power_vector = np.zeros(shape=(n_freq_bins, n_frames))\n\n for i in range(self.n_mm_itertaions):\n (\n org_cost_0,\n org_cost_1,\n org_cost_2,\n org_cost_3,\n cost_0,\n cost_1,\n cost_2,\n cost_3,\n est_p,\n z,\n x,\n x_non_const_power,\n ) = coplaner_doa_estimation_one_iteration(\n freqs_d,\n est_p,\n sigma,\n z,\n x,\n use_clustering=use_clustering,\n cluster_index=cluster_index,\n cluster_center=cluster_center,\n iter_num2=self.rooting_n_iter,\n silent_mode=silent_mode,\n zero_feature_index=2,\n )\n\n if silent_mode == False:\n print(cost_0, cost_1, cost_2, cost_3)\n\n # est_pから\n # fti\n position_vector[self.freq_bins, ...] = est_p\n\n x_non_const_power_vector[self.freq_bins, :] = x_non_const_power[:, :, 0]\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n # gridを探す\n\n # position_vectorに相当する方向を取る\n if self.dim == 2:\n azimuth_recon = self.grid.azimuth\n # ダミー\n colatitude_recon = np.zeros_like(azimuth_recon) + np.pi\n elif self.dim == 3:\n azimuth_recon = self.grid.azimuth\n colatitude_recon = self.grid.colatitude\n\n doas = np.concatenate(\n (\n colatitude_recon[:, None], # colatitude [0, pi]\n azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n # source_locations: 3, n_grid_num\n grid_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n size = np.einsum(\"in,in->n\", np.conjugate(grid_locations), grid_locations)\n size = np.sqrt(size)[np.newaxis, ...]\n grid_locations = grid_locations / np.maximum(size, 1.0e-18)\n\n grid_index_buf = []\n\n # 制約なし解のパワーが1を大幅に超えて居たらReject\n print(np.average(x_non_const_power_vector))\n valid_index = x_non_const_power_vector < self.reject_th\n for k in self.freq_bins:\n prod = np.einsum(\"in,ti->tn\", grid_locations, position_vector[k, ...])\n grid_index = np.argmax(prod, axis=-1)\n\n grid_index = grid_index[valid_index[k, :]]\n\n grid_index_buf.append(grid_index)\n grid_index_buf = np.array(grid_index_buf)\n\n for n in range(self.grid.n_points):\n spire_cost[n] = spire_cost[n] + np.count_nonzero(grid_index_buf == n)\n\n self.grid.set_values(spire_cost)", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def forward(self, x):\n x = self.pad_tensor(x)\n if self.network_controller.is_float_coefficient:\n return self.bn(self.conv(x))\n else:\n res = F.conv2d(x, self.q(self.conv.weight), self.conv.bias, self.stride,\n self.padding_conv, self.dilation, self.group)\n return self.bn(res)", "def forward(self, x, freq=None, shift=None):\n\n # if freq != None and shift != None:\n # x = self.encoding(x, freq, shift)\n # else:\n # x = self.encoding(x)\n out = self.net(x)\n rgb = torch.sigmoid(out[..., :-1])\n sigma = F.softplus(out[..., -1])\n return rgb, sigma", "def waverec2(coeffs: list, wavelet: pywt.Wavelet) -> torch.Tensor:\n _, _, rec_lo, rec_hi = get_filter_tensors(\n wavelet, flip=False, device=coeffs[0].device,\n dtype=coeffs[0].dtype\n )\n filt_len = rec_lo.shape[-1]\n rec_filt = construct_2d_filt(lo=rec_lo, hi=rec_hi)\n\n res_ll = coeffs[0]\n for c_pos, res_lh_hl_hh in enumerate(coeffs[1:]):\n res_ll = torch.cat(\n [res_ll, res_lh_hl_hh[0], res_lh_hl_hh[1], res_lh_hl_hh[2]], 1\n )\n res_ll = torch.nn.functional.conv_transpose2d(\n res_ll, rec_filt, stride=2)\n\n # remove the padding\n padl = (2 * filt_len - 3) // 2\n padr = (2 * filt_len - 3) // 2\n padt = (2 * filt_len - 3) // 2\n padb = (2 * filt_len - 3) // 2\n if c_pos < len(coeffs) - 2:\n # if 1:\n pred_len = res_ll.shape[-1] - (padl + padr)\n next_len = coeffs[c_pos + 2][0].shape[-1]\n pred_len2 = res_ll.shape[-2] - (padt + padb)\n next_len2 = coeffs[c_pos + 2][0].shape[-2]\n if next_len != pred_len:\n padr += 1\n pred_len = res_ll.shape[-1] - (padl + padr)\n assert (\n next_len == pred_len\n ), \"padding error, please open an issue on github \"\n if next_len2 != pred_len2:\n padb += 1\n pred_len2 = res_ll.shape[-2] - (padt + padb)\n assert (\n next_len2 == pred_len2\n ), \"padding error, please open an issue on github \"\n if padt > 0:\n res_ll = res_ll[..., padt:, :]\n if padb > 0:\n res_ll = res_ll[..., :-padb, :]\n if padl > 0:\n res_ll = res_ll[..., padl:]\n if padr > 0:\n res_ll = res_ll[..., :-padr]\n return res_ll", "def miccai2018_resnet(vol_size, enc_nf, dec_nf, int_steps=7, use_miccai_int=False, indexing='ij', bidir=False, vel_resize=1/2): \n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get unet\n unet_model = res_unet_core(vol_size, enc_nf, dec_nf, full_size=False)\n [src, tgt] = unet_model.inputs\n x_out = unet_model.outputs[-1]\n\n # velocity mean and logsigma layers\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_mean = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x_out)\n # we're going to initialize the velocity variance very low, to start stable.\n flow_log_sigma = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma')(x_out)\n flow_params = concatenate([flow_mean, flow_log_sigma])\n\n # velocity sample\n flow = Sample(name=\"z_sample\")([flow_mean, flow_log_sigma])\n\n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample = flow\n flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample = Negate()(z_sample)\n neg_flow = nrn_layers.VecInt(method='ss', name='neg_flow-int', int_steps=int_steps)(rev_z_sample)\n\n # get up to final resolution\n flow = trf_resize(flow, vel_resize, name='diffflow')\n\n if bidir:\n neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')\n\n # transform\n y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])\n if bidir:\n y_tgt = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgt, neg_flow])\n\n # prepare outputs and losses\n outputs = [y, flow_params]\n if bidir:\n outputs = [y, y_tgt, flow_params]\n\n # build the model\n return Model(inputs=[src, tgt], outputs=outputs)", "def forward(self, x):\n x = x.view(1, 1, *x.size())\n return self.conv(x, weight=self.weight, padding=self.padding).squeeze()", "def deconvolution(obs, green, lambd):\n\n nr, nt = obs.shape\n num = np.zeros(nt)\n den = np.zeros(nt)\n\n for ir in range(len(obs)):\n\n OBS = fft(obs[ir, :])\n GRE = fft(green[ir, :])\n\n # Sum all\n num = num + np.conj(GRE) * OBS\n den = den + np.conj(GRE) * GRE\n\n # Get maximum value of denominator\n maxden = np.max(np.abs(den))\n\n # Waterlevel\n wl = lambd * maxden\n\n # Deconvolution using the waterlevel\n src = np.real(ifft(num / (den+wl).T))\n\n # Compute fit to original data\n res = obs\n chi0 = 0.5 * np.sum(np.sum(res ** 2))\n\n syn = compute_synth(green, src)\n res = obs - syn\n chi = 0.5 * np.sum(np.sum(res ** 2))\n\n print(chi/chi0)\n\n return src, syn", "def signalTransform(dat):\n return numpy.convolve(dat, slopWindow, \"same\")", "def morl_conv(image, subsample = 1, J = 0, theta = 0, sigma = 0.85, xi = 3 * np.pi / 4):\r\n morlet_filter = Morlet2D_grid(image.shape[0], image.shape[1], J, theta, sigma, xi)\r\n return (signal.fftconvolve(image, morlet_filter, mode = \"same\")[ :: subsample, ::subsample])", "def multiply(self, layer):\n pass", "def Calc(c, Filter):\r\n scalar= 0 # the scalar multiplication\r\n for x in range (3):\r\n lineC = c[x:x+1][0] # a line from the slice\r\n lineF= Filter[x:x+1][0] # a line fron the filter\r\n scalar= scalar + np.dot(lineC, lineF) # scalar multiplication of two lines- one from each matrix \r\n return scalar", "def forward(self, x):\n\n out = F.relu(self.conv1(x))\n out = F.relu(self.conv2(out))\n\n out = F.relu(self.resnet_block(out))\n\n # 8 x 8 x 64\n noise = self.sample_noise((out.shape[0], self.noise_dim, out.shape[2], out.shape[3]))\n\n # print(noise.shape)\n # print(out.shape)\n\n out = torch.cat([out, noise], dim=1)\n\n # print(out.shape)\n\n out = F.relu(self.deconv1(out))\n out = F.tanh(self.deconv2(out))\n\n return out", "def _irfft2d(f_x) :", "def conv_forward(x, w, b, conv_param):\n stride = conv_param['stride']\n pad = conv_param['pad']\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_out = 1 + (H + 2 * pad - HH) / stride\n W_out = 1 + (H + 2 * pad - WW) / stride\n H_out = int(H_out)\n W_out = int(W_out)\n\n out = np.zeros((N, F, H_out, W_out))\n for n in range(N):\n conv_in = np.pad(x[n], ((0, 0), (pad, pad), (pad, pad)), mode='constant')\n for f in range(F):\n conv_w = w[f]\n conv_b = b[f]\n for i in range(H_out):\n for j in range(W_out):\n conv_i = i * stride\n conv_j = j * stride\n conv_area = conv_in[:, conv_i : conv_i + HH, conv_j : conv_j + WW]\n out[n, f, i, j] = np.sum(conv_area * conv_w) + conv_b\n\n cache = (x, w, b, conv_param)\n return out, cache", "def coadd(self, sp, method='pixel'):\n\t\tif method == 'pixel':\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tif self.apply_sigma_mask:\n\t\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\telse:\n\t\t\t\tself.mask = []\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\telif method == 'wavelength':\n\t\t\tself_supers = copy.deepcopy(self)\n\t\t\tg = interpolate.interp1d(self.wave, self.flux)\n\t\t\tsp_supers = copy.deepcopy(sp)\n\t\t\tf = interpolate.interp1d(sp.wave, sp.flux)\n\t\t\t## 10x supersample the average difference of \n\t\t\t## the wavelength\n\t\t\t#step0 = np.mean(np.diff(self.wave))/10\n\t\t\t#self_supers.wave = np.arange(self.wave[0],\n\t\t\t#\tself.wave[-1],step0)\n\t\t\tself_supers.flux = g(self_supers.wave)\n\t\t\tself_supers.oriWave = np.arange(self.oriWave[0],\n\t\t\t\tself.oriWave[-1],(self.oriWave[-1]-self.oriWave[0])/10240)\n\t\t\tg1 = interpolate.interp1d(self.oriWave, self.oriFlux)\n\t\t\tself_supers.oriFlux = g1(self_supers.oriWave)\n\n\t\t\t#step = np.mean(np.diff(sp.wave))/10\n\t\t\t#sp_supers.wave = np.arange(sp.wave[0],sp.wave[-1],step)\n\t\t\t#sp_supers.flux = f(sp_supers.wave)\n\t\t\tsp_supers.oriWave = np.arange(sp.oriWave[0],\n\t\t\t\tsp.oriWave[-1],(sp.oriWave[-1]-sp.oriWave[0])/10240)\n\t\t\tf1 = interpolate.interp1d(sp.oriWave, sp.oriFlux)\n\t\t\tsp_supers.oriFlux = f1(sp_supers.oriWave)\n\n\t\t\t## calculate the max cross correlation value\n\t\t\tdef xcorr(a0,b0,shift):\n\t\t\t\t\"\"\"\n\t\t\t\tShift is the index number after supersampling \n\t\t\t\tboth of the spectra.\n\t\t\t\t\"\"\"\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]\n\n\t\t\txcorr_list = []\n\t\t\t## mask the ending pixels\n\t\t\tself_supers2 = copy.deepcopy(self_supers)\n\t\t\tsp_supers2 = copy.deepcopy(sp_supers)\n\t\t\tself_supers2.wave = self_supers2.wave[1000:-1000]\n\t\t\tself_supers2.flux = self_supers2.flux[1000:-1000]\n\t\t\tsp_supers2.wave = sp_supers2.wave[1000:-1000]\n\t\t\tsp_supers2.flux = sp_supers2.flux[1000:-1000]\n\t\t\tfor shift in np.arange(-10,10,1):\n\t\t\t\txcorr_list.append(xcorr(self_supers2,sp_supers2,shift))\n\n\t\t\t## dignostic plot for cc result\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(np.arange(-10,10,1),np.array(xcorr_list),'k-')\n\t\t\tplt.show()\n\t\t\tplt.close()\n\n\t\t\tstep = np.absolute(np.mean(np.diff(sp_supers.wave)))\n\t\t\tbestshift = np.arange(-10*step,10*step,step)[np.argmax(xcorr_list)]\n\t\t\tsp_supers.oriWave += bestshift\n\t\t\t## discard the points where the wavelength values\n\t\t\t## are larger\n\t\t\tcondition = (self.oriWave > sp_supers.oriWave[0])\\\n\t\t\t& (self.oriWave < sp_supers.oriWave[-1])\n\n\t\t\tself.oriFlux = self.oriFlux[np.where(condition)]\n\t\t\tself.oriWave = self.oriWave[np.where(condition)]\n\t\t\tself.oriNoise = self.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriNoise = sp_supers.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriFlux = np.array(smart.integralResample(xh=sp_supers.oriWave, \n\t\t\t\tyh=sp_supers.oriFlux, xl=self.oriWave))\n\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp_supers.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp_supers.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def forward(self, stride, padding, *args):\n #TODO\n parents = list(args)\n inp_ = parents[0].value\n kernel = parents[1].value\n \n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n assert in_channels == in_channels_t\n \n return conv2d((inp_, kernel, stride, padding))\n # return conv2d_mul(inp_, kernel, stride, padding)", "def noiseReduction(self):\n pass", "def forward(self, state):\n output = self.conv_layers(state)\n output = output.view(-1, 7*7*64)\n output = self.fc(output)\n return output", "def set_idemix_parameter(state):\n vs = state.variables\n settings = state.settings\n\n bN0 = (\n npx.sum(\n npx.sqrt(npx.maximum(0.0, vs.Nsqr[:, :, :-1, vs.tau]))\n * vs.dzw[npx.newaxis, npx.newaxis, :-1]\n * vs.maskW[:, :, :-1],\n axis=2,\n )\n + npx.sqrt(npx.maximum(0.0, vs.Nsqr[:, :, -1, vs.tau])) * 0.5 * vs.dzw[-1:] * vs.maskW[:, :, -1]\n )\n fxa = npx.sqrt(npx.maximum(0.0, vs.Nsqr[..., vs.tau])) / (1e-22 + npx.abs(vs.coriolis_t[..., npx.newaxis]))\n\n cstar = npx.maximum(1e-2, bN0[:, :, npx.newaxis] / (settings.pi * settings.jstar))\n\n vs.c0 = npx.maximum(0.0, settings.gamma * cstar * gofx2(fxa, settings.pi) * vs.maskW)\n vs.v0 = npx.maximum(0.0, settings.gamma * cstar * hofx1(fxa, settings.pi) * vs.maskW)\n vs.alpha_c = (\n npx.maximum(\n 1e-4,\n settings.mu0 * npx.arccosh(npx.maximum(1.0, fxa)) * npx.abs(vs.coriolis_t[..., npx.newaxis]) / cstar**2,\n )\n * vs.maskW\n )\n\n return KernelOutput(c0=vs.c0, v0=vs.v0, alpha_c=vs.alpha_c)", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_prime = H - (HH - 1)\n W_prime = W - (WW - 1)\n out = np.zeros((N, F, H_prime, W_prime))\n \n for n in range(N):\n for f in range(F):\n for i in range(H_prime):\n for j in range(W_prime):\n out[n, f, i, j] = np.sum(x[n, :, i:i+HH, j:j+WW] * w[f])\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def embed(self, x):\r\n # shape=(batch, 1, 1024, 1)\r\n x = x[:, None, :, None]\r\n\r\n # Forward pass through first five layers\r\n x = self.layer(x, self.conv1, self.conv1_BN, (0, 0, 254, 254))\r\n x = self.layer(x, self.conv2, self.conv2_BN)\r\n x = self.layer(x, self.conv3, self.conv3_BN)\r\n x = self.layer(x, self.conv4, self.conv4_BN)\r\n x = self.layer(x, self.conv5, self.conv5_BN)\r\n\r\n return x", "def forward(self, x):\n out = self.fc1(x)\n out = out.view(-1, 196, 4, 4)\n out = self.tconv(out)\n\n return out", "def window(self):\n hanning = numpy.hanning(nFFT)\n for i in range(nFFT):\n self.cur_input[i] *= hanning[i]", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def forward(self, x):\r\n # return x.repeat_interleave(self.kernel_size, dim=1)\r\n x = x.permute(0, 2, 1)\r\n x = torch.nn.functional.interpolate(x, scale_factor=self.kernel_size, mode='nearest')\r\n return x.permute(0, 2, 1)", "def forward(ctx, inX, in_weight, in_bias=None, convparam=None):\n # note: for demo purpose, assume dilation=1 and padding_mode='zeros',\n # also assume the padding and stride is the same for ROWS and COLS, respectively\n\n if convparam is not None:\n padding, stride = convparam\n else:\n padding, stride = 0, 1\n\n nOutCh, nInCh, nKnRows, nKnCols = in_weight.shape\n nImgSamples, nInCh, nInImgRows, nInImgCols = inX.shape\n\n # determine the output shape\n nOutRows = (nInImgRows + 2 * padding - nKnRows) // stride + 1\n nOutCols = (nInImgCols + 2 * padding - nKnCols) // stride + 1\n\n ''' \n using torch.nn.functional.unfold to extract nL blocks of size of inChannels x nKnRows x nKnCols elements\n Each block can be used to do multiplication with the kernels\n Input shape: (nImgSamples, nInCh, ∗)\n Output shape: (nImgSamples, nB = nInCh X ∏(kernel_sizes), nL = nOutRows X nOutCols)\n '''\n inX_nSamp_nB_nL = torch.nn.functional.unfold(inX, (nKnRows, nKnCols), padding=padding, stride=stride)\n inX_nSamp_nL_nB = inX_nSamp_nB_nL.transpose(1, 2)\n # \"view\" won't work if some part of the tensor is not contiguous, for example, \n # when coming from torch.flip() of the original one. \n # Therefore, \"view\" is changed to \"reshape\"\n # kn_nOutCh_nB = in_weight.view(nOutCh, -1) \n kn_nOutCh_nB = in_weight.reshape(nOutCh, -1)\n kn_nB_nOutCh = kn_nOutCh_nB.t()\n out_nSamp_nL_nOutCh = inX_nSamp_nL_nB.matmul(kn_nB_nOutCh)\n out_nSamp_nOutCh_nL = out_nSamp_nL_nOutCh.transpose(1, 2)\n out = out_nSamp_nOutCh_nL.reshape(nImgSamples, nOutCh, nOutRows, nOutCols)\n\n if in_bias is not None:\n out += in_bias.view(1, -1, 1, 1)\n\n # cache these objects for use in the backward pass\n ctx.InImgSize = (nInImgRows, nInImgCols)\n ctx.out_nSamp_nOutCh_nL_shape = out_nSamp_nOutCh_nL.shape\n ctx.inX_nSamp_nL_nB = inX_nSamp_nL_nB\n ctx.kn_nB_nOutCh = kn_nB_nOutCh\n ctx.parameters = (nOutCh, nInCh, nKnRows, nKnCols, padding, stride)\n # ctx.save_for_backward(inX_nSamp_nL_nB, kn_nB_nOutCh)\n\n return out", "def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):\n concat_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, fused=fuse)(ip)\n x = Activation('relu')(x)\n x = Conv2D(int(nb_filter * compression), (1, 1),\n kernel_initializer='he_normal', padding='same',\n use_bias=False, kernel_regularizer=l2(weight_decay))(x)\n x = AveragePooling2D((2, 2), strides=(2, 2))(x)\n\n return x", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n H1 = H-HH+1\n W1 = W-WW+1\n out = np.zeros([N,F,H1,W1])\n wn = np.tile(w,(N,1,1,1,1))\n all_but_first = tuple(range(out.ndim))[1:]\n for f in range(F):\n for i in range(H1):\n for j in range(W1):\n out[:,f,i,j] = np.sum(x[:,:,i:i+HH,j:j+WW] * wn[:,f], axis=all_but_first)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def Naive_forwardpass(self):\n\n for filter_k in range(0, self.n_filters):\n filter_col = self.im2col(self.filter_map[filter_k].data_mtx)\n for hgt_indx in range(0, self.Output_Height):\n for wdth_indx in range(0, self.Output_Width):\n wdth_start_index = wdth_indx * self.stride_len\n wdth_end_index= wdth_start_index + self.filter_size\n hgt_start_index = hgt_indx * self.stride_len\n hgt_end_index = hgt_start_index + self.filter_size\n trn_img_area = self.input_vol.padded_mtx[:, wdth_start_index:wdth_end_index,\n hgt_start_index:hgt_end_index]\n trn_img_col = self.im2col(trn_img_area)\n self.output_Tensor.data_mtx[filter_k,wdth_indx , hgt_indx] = self.convolution_op(trn_img_col,\n filter_col) + np.sum(self.bias_vol[filter_k].data_mtx)\n return self.output_Tensor", "def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)", "def apply_filter_operator(self, input, filter_operator):\n\n input = input.permute(0,2,1).contiguous().view(-1,self.num_nodes)\n filter_operator = filter_operator.view(self.num_nodes, -1)\n output = torch.matmul(input, filter_operator).view(self.batch_size, self.filter_size_in, self.num_nodes, self.filter_size_out).permute(0,2,3,1)\n\n matched_mask = self.mask.unsqueeze(2).repeat(1,1,self.filter_size_out,1)\n output = output * matched_mask\n\n # Debug\n logger.debug('Filter operator with matched dimensions of spectral conv layer: {}'.format(filter_operator.shape))\n logger.debug('Output after applying filter operator on input of spectral conv layer: {}'.format(output.size()))\n\n return output", "def linconv(nx):", "def calculate_uip(vpx, raster, weight, neuron, tau):\n\n m = 1\n\n vpx[0] = weight[neuron, raster[0][\"id\"]]\n\n for k, evt in enumerate(raster[1:], 1):\n\n dt = evt[\"time\"] - raster[k - 1][\"time\"]\n\n if not dt:\n m -= 1\n else:\n vpx[m] = vpx[m - 1] * np.exp(-dt / tau)\n\n vpx[m] += weight[neuron, evt[\"id\"]]\n\n m += 1", "def idealOpAmp():", "def forward(self, x):\n n, c, t, v = x.size()\n x1 = x.view(n, c * t, v)\n y = None\n for i in range(self.num_subset):\n A1 = self.PA[i]\n z = self.conv_d[i](torch.matmul(x1, A1).view(n, c, t, v))\n y = z + y if y is not None else z\n A2 = self.cen(x)\n z2 = torch.matmul(x1, A2).view(n, c, t, v)\n z2 = self.conv_cen(z2)\n y += self.lamb * z2\n y = self.bn(y)\n y += self.down(x)\n y = self.relu(y)\n y = self.attention(y)\n return y", "def forward(self, x):\n x = x.permute(0, 3, 1, 2) # [batch_size*num_slots, features, height, width]\n x = self.dec(x)\n return x", "def forward(self, x):\n x = x.permute(0, 3, 1, 2) # [batch_size*num_slots, features, height, width]\n x = self.dec(x)\n return x", "def womyshift(hop):\n import matplotlib.pyplot as plt\n import logging\n from tmath.wombat.inputter import inputter\n from tmath.wombat.wshow import wshow\n plt.cla()\n plt.plot(hop[0].wave,hop[0].flux,drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.title(hop[0].obname)\n wshow()\n\n print('Routine to linearly shift flux scale\\n')\n\n shift=inputter('Enter flux shift: ','float',False)\n\n hop[0].flux=hop[0].flux+shift\n\n plt.plot(hop[0].wave,hop[0].flux,drawstyle='steps-mid')\n\n logging.debug('File {} flux scale shifted by {} A'.format\\\n (hop[0].obname,shift))\n\n #FIX header\n return hop", "def forward(self, ps):\n if self.w is None or len(self.w) != ps.size(-1):\n self.populate_w(ps)\n\n out = (ps*self.w).sum(-1)\n return out", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def forward(self, x):\n binsize = 4000\n x_padding = 112000\n x_block = Blocksize\n\n def run(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n return out7\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n\n segouts = []\n starts = np.arange(0, x.size(2), x_block)\n for start in starts:\n if start == starts[0]:\n segouts.append(\n checkpoint(run, x[:, :, start : start + x_block + x_padding], dummy)[\n :, :, : int(x_block / binsize)\n ]\n )\n elif start == starts[-1]:\n segouts.append(\n checkpoint(run, x[:, :, start - x_padding :], dummy)[\n :, :, int(x_padding / binsize) :\n ]\n )\n else:\n segouts.append(\n checkpoint(\n run, x[:, :, start - x_padding : start + x_block + x_padding], dummy,\n )[\n :, :, int(x_padding / binsize) : int((x_block + x_padding) / binsize),\n ]\n )\n\n out = torch.cat(segouts, 2)\n return out", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def forward(self, x):\n assert len(x.shape) == 5, \"batchconv2d expects a 5d [{}] tensor\".format(x.shape)\n b_i, b_j, c, h, w = x.shape\n out = self.conv(x.permute([1, 0, 2, 3, 4]).contiguous().view(b_j, b_i * c, h, w))\n return out.view(b_j, b_i, self.out_channels,\n out.shape[-2], out.shape[-1]).permute([1, 0, 2, 3, 4])", "def conv_forward(A_prev, W, b, activation, padding=\"same\", stride=(1, 1)):\n m, h_prev, w_prev, c_prev = A_prev.shape\n kh, kw, _, c_new = W.shape\n ph = pw = 0\n sh, sw = stride\n\n if padding == 'same':\n ph = int(((h_prev - 1) * sh + kh - h_prev) / 2)\n pw = int(((w_prev - 1) * sw + kw - w_prev) / 2)\n elif type(padding) == tuple:\n ph, pw = padding\n\n pad = np.pad(A_prev, ((0, 0), (ph, ph), (pw, pw), (0, 0)), 'constant')\n\n ch = int((h_prev + 2 * ph - kh) / sh) + 1\n cw = int((w_prev + 2 * pw - kw) / sw) + 1\n\n conv_W = np.zeros((m, ch, cw, c_new))\n\n for i in range(ch):\n for j in range(cw):\n for k in range(c_new):\n slide_img = pad[:, i * sh:i * sh + kh,\n j * sw:j * sw + kw]\n kernel = W[:, :, :, k]\n element = np.multiply(slide_img, kernel)\n conv_W[:, i, j, k] = np.sum(np.sum(np.sum(element,\n axis=1), axis=1), axis=1)\n\n Z = conv_W + b\n\n return activation(Z)", "def forward(self, x):\n # Encoder1 --block1\n encode_block1 = self.conv_encoder1(x)\n if self.residus[0] == 1:\n encode_block1 += self.residual_shortcut1(x)\n encode_pool1 = self.max_pool_encoder1(encode_block1)\n\n # Encoder2 --block2\n encode_block2 = self.conv_encoder2(encode_pool1)\n if self.residus[1] == 1:\n encode_block2 += self.residual_shortcut2(encode_pool1)\n encode_pool2 = self.max_pool_encoder2(encode_block2)\n \n # Encoder3 --block3\n encode_block3 = self.conv_encoder3(encode_pool2)\n if self.residus[2] == 1:\n encode_block3 += self.residual_shortcut3(encode_pool2) ##\n encode_pool3 = self.max_pool_encoder3(encode_block3)\n \n # Encoder4 --block4\n encode_block4 = self.conv_encoder4(encode_pool3) \n if self.residus[3] == 1:\n encode_block4 += self.residual_shortcut4(encode_pool3) ##\n encode_pool4 = self.max_pool_encoder4(encode_block4)\n\n # Transitional block \n encode_block_trans_1 = self.conv_encoder_trans_1(encode_pool4)\n encode_block_trans_1 = self.conv_encoder_trans_2(encode_block_trans_1)\n encode_block_trans_1 = self.conv_encoder_trans_3(encode_block_trans_1)\n \n encode_block_trans_1 = torch.cat((encode_pool4, encode_block_trans_1), 1) # Concatenation\n \n encode_block_trans_2 = self.conv_encoder_trans_4(encode_block_trans_1)\n encode_block_trans_2 = self.conv_encoder_trans_5(encode_block_trans_2)\n encode_block_trans_2 = self.conv_encoder_trans_6(encode_block_trans_2)\n\n middle_block = torch.cat((encode_block_trans_1, encode_block_trans_2), 1) # Concatenation\n \n convTranspose_transitional = self.convTranspose_transitional(middle_block) \n # Decoder4 --block5\n decode_block4 = torch.cat((convTranspose_transitional, encode_block4), 1) \n if self.residus[4] == 1:\n decode_block4 += self.residual_shortcut_encoder_decoder4(encode_block4)\n\n #--block6\n cat_layer3 = self.conv_decoder4(decode_block4) \n if self.residus[5] == 1:\n cat_layer3 += self.residual_shortcut_decoder4(decode_block4)\n convTranspose_decoder4 = self.convTranspose_decoder4(cat_layer3)\n \n \n # Decoder3 --block7\n decode_block3 = torch.cat((convTranspose_decoder4, encode_block3), 1)\n if self.residus[6] == 1:\n decode_block3 += self.residual_shortcut_encoder_decoder3(encode_block3)\n \n #--block8\n cat_layer2 = self.conv_decoder3(decode_block3) \n if self.residus[7] == 1:\n cat_layer2 += self.residual_shortcut_decoder3(decode_block3)\n convTranspose_decoder3 = self.convTranspose_decoder3(cat_layer2)\n \n # Decoder2 --block9\n decode_block2 = torch.cat((convTranspose_decoder3, encode_block2), 1) \n if self.residus[8] == 1:\n decode_block2 += self.residual_shortcut_encoder_decoder2(encode_block2)\n \n #--block10\n cat_layer1 = self.conv_decoder2(decode_block2)\n if self.residus[9] == 1:\n cat_layer1 += self.residual_shortcut_decoder2(decode_block2)\n convTranspose_decoder2 = self.convTranspose_decoder2(cat_layer1)\n \n # Decoder1 --block11\n decode_block1 = torch.cat((convTranspose_decoder2, encode_block1), 1) \n if self.residus[10] == 1:\n decode_block1 += self.residual_shortcut_encoder_decoder1(encode_block1)\n \n #--block12\n final_layer = self.final_layer(decode_block1)\n if self.residus[11] == 1:\n final_layer += self.residual_shortcut_final_layer(decode_block1)\n \n \n return final_layer", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def _step1_optimization_closure(self, iteration, step):\n if iteration == self.num_iter_first_step - 1:\n reg_noise_std = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 300) # TODO: make it dependant in the max number of iterations\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_first_step - 1:\n aug = 0\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_net_input = self.clean_net_inputs[aug] + (self.clean_net_inputs[aug].clone().normal_() * reg_noise_std)\n # watermark_net_input = self.watermark_net_inputs[aug] # + (self.watermark_net_input.clone().normal_())\n # mask_net_input = self.mask_net_inputs[aug]\n # applies the nets\n self.clean_net_output = self.clean_net(clean_net_input)\n self.total_loss = 0\n self.blur = 0\n self.total_loss += self.extended_l1_loss(self.clean_net_output,\n self.image_torchs[aug],\n (1 - self.watermark_hint_torchs[aug]))\n self.total_loss.backward(retain_graph=True)", "def pass_through_lateral_conn(self):\n\n if self.conv_filter is not None:\n boundary = 'wrap' if self.circular else 'fill'\n self.P = convolve2d(self.P, self.conv_filter, 'same', boundary)\n\n self.P = self.P / self.P.sum() # rescale to PD", "def forward(self, x):\n #print(\"full frwd x shape:\",x.shape)\n y=np.zeros((x.shape[0],self.W.shape[0]))\n y=np.dot(x,np.transpose(self.W))+self.b\n self.x=np.copy(x)\n return y", "def forward(self, x):\r\n\r\n return torch.conv2d(input=x,\r\n weight=self.weight * self.scale, # scale the weight on runtime\r\n bias=self.bias if self.use_bias else None,\r\n stride=self.stride,\r\n padding=self.pad)", "def dir_conv_layer(model, nb_filters, rate):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', dilation_rate=rate, activation='relu')(model)\n model = BatchNormalization()(model)\n\n # exponentially increase dilated convolution receptive field\n # receptive field size loops back around when rate = 16 to create [1...8] block\n rate *= 2\n if rate == 16:\n rate = 1\n return model, rate", "def miccai2018_net_t1t2(vol_size, enc_nf, dec_nf, int_steps=7, use_miccai_int=False, indexing='ij', bidir=False, vel_resize=1/2): \n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get unet\n unet_model_channel1 = unet_core(vol_size, enc_nf, dec_nf, full_size=False)\n [srcT1, tgtT1] = unet_model_channel1.inputs\n x_out_T1 = unet_model_channel1.outputs[-1]\n unet_model_channel2 = unet_core(vol_size, enc_nf, dec_nf, full_size=False)\n [srcT2, tgtT2] = unet_model_channel2.inputs\n x_out_T2 = unet_model_channel2.outputs[-1]\n\n # velocity mean and logsigma layers\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_mean_T1 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow_T1')(x_out_T1)\n # we're going to initialize the velocity variance very low, to start stable.\n flow_log_sigma_T1 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma_T1')(x_out_T1)\n flow_params_T1 = concatenate([flow_mean_T1, flow_log_sigma_T1])\n flow_T1 = Sample(name=\"z_sample_T1\")([flow_mean_T1, flow_log_sigma_T1])\n \n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow_T1\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow_T1 = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample = flow_T1\n flow_T1 = nrn_layers.VecInt(method='ss', name='flow-int_T1', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample = Negate()(z_sample)\n neg_flow_T1 = nrn_layers.VecInt(method='ss', name='neg_flow-int_T1', int_steps=int_steps)(rev_z_sample)\n # get up to final resolution\n flow_T1 = trf_resize(flow_T1, vel_resize, name='diffflow_T1')\n if bidir:\n neg_flow_T1 = trf_resize(neg_flow_T1, vel_resize, name='neg_diffflow_T1')\n # transform\n y_T1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow_T1])\n if bidir:\n y_tgt_T1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgtT1, neg_flow_T1])\n \n flow_mean_T2 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow_T2')(x_out_T2)\n # we're going to initialize the velocity variance very low, to start stable.\n flow_log_sigma_T2 = Conv(ndims, kernel_size=3, padding='same',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),\n bias_initializer=keras.initializers.Constant(value=-10),\n name='log_sigma_T2')(x_out_T2)\n flow_params_T2 = concatenate([flow_mean_T2, flow_log_sigma_T2])\n flow_T2 = Sample(name=\"z_sample_T2\")([flow_mean_T2, flow_log_sigma_T2])\n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow_T2\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow_T2 = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample = flow_T2\n flow_T2 = nrn_layers.VecInt(method='ss', name='flow-int_T2', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample = Negate()(z_sample)\n neg_flow_T2 = nrn_layers.VecInt(method='ss', name='neg_flow-int_T2', int_steps=int_steps)(rev_z_sample)\n # get up to final resolution\n flow_T2 = trf_resize(flow_T2, vel_resize, name='diffflow_T2')\n if bidir:\n neg_flow_T2 = trf_resize(neg_flow_T2, vel_resize, name='neg_diffflow_T2')\n # transform\n y_T2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow_T2])\n if bidir:\n y_tgt_T2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgtT2, neg_flow_T2])\n\n # velocity sample\n flow_params = MergeInputs3D()([flow_params_T1, flow_params_T2])\n ndims = len(flow_params.get_shape()) - 2\n flow_mean = flow_params[..., 0:ndims]\n flow_log_sigma = flow_params[..., ndims:]\n flow = Sample(name=\"z_sample\")([flow_mean, flow_log_sigma])\n\n # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)\n if use_miccai_int:\n # for the miccai2018 submission, the squaring layer\n # scaling was essentially built in by the network\n # was manually composed of a Transform and and Add Layer.\n v = flow\n for _ in range(int_steps):\n v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])\n v = keras.layers.add([v, v1])\n flow = v\n\n else:\n # new implementation in neuron is cleaner.\n z_sample = flow\n flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=int_steps)(z_sample)\n if bidir:\n rev_z_sample = Negate()(z_sample)\n neg_flow = nrn_layers.VecInt(method='ss', name='neg_flow-int', int_steps=int_steps)(rev_z_sample)\n\n # get up to final resolution\n flow = trf_resize(flow, vel_resize, name='diffflow')\n\n if bidir:\n neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')\n\n # transform\n y_T1_byflow = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow])\n y_T2_byflow = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow])\n if bidir:\n y_tgt_T1_bynegflow = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgtT1, neg_flow])\n y_tgt_T2_bynegflow = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([tgtT2, neg_flow])\n\n # prepare outputs and losses\n outputs = [y_T1_byflow,y_T2_byflow, flow_params, y_T1, y_T2]\n if bidir:\n outputs = [y_T1_byflow,y_T1_byflow,y_tgt_T1_bynegflow, y_tgt_T2_bynegflow, flow_params,y_T1, y_tgt_T1,y_T2, y_tgt_T2]\n # build the model\n return Model(inputs=[srcT1, tgtT1, srcT2, tgtT2], outputs=outputs)", "def _delayandsum(data, offsets, ifactor2, steeramp, out):\n gridsize, numchannels = offsets.shape\n for gi in nb.prange(gridsize):\n out[gi] = 0\n for mi in range(numchannels):\n ind = offsets[gi,mi]\n out[gi] += (data[ind,mi] * (1-ifactor2[gi,mi]) \\\n + data[ind+1,mi] * ifactor2[gi,mi]) * steeramp[gi,mi]", "def _cconvolve(x, H, nfft, wlen, axis):\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Use np.pad for zero padding of the input.\n #Save shape of input data and filters.\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n x = np.pad(x,[(0,0),(0,0),(1,1),(1,1)],mode = 'constant')\n #Convolve each filter to create the activation maps.\n '''Compute activation maps size.First dimension:number of training examples.\n Second dimension:depth is as the number of filters.\n Width and height will be computed based on the equation that we showed in the lectures.\n The equation :(W - F + 2P)/S + 1 where:\n -W:input size.\n -F:receptive field(number of filters).\n -P: padding size.\n -S: the stride that we use.\n '''\n out_width = int((W - WW + 2 * pad) / (stride) + 1)\n out_height = int((H - HH + 2 * pad) / (stride) + 1)\n out = np.zeros((N,F,out_height,out_width))\n #Compute the activation maps for each one of the N training examples.\n for t in range(N):\n curr_x = x[t,:,:,:]\n #Loop over each filter.\n for k in range(F):\n curr_filter = w[k,:,:,:]\n #Go over all valid spots in current training example.\n out_i = 0\n for i in range(0,x.shape[2] - HH + 1,stride):\n out_j = 0\n for j in range(0,x.shape[3] - WW + 1,stride):\n #Compute dot product in current spot.\n dot_product = np.sum(curr_filter * curr_x[:,i:(i + HH),j:(j + WW)])\n out[t,k,out_i,out_j] = dot_product \\\n + b[k]\n #Increment out_j\n out_j += 1\n out_i += 1\n\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def Focus_beam(Collimated_Pupil, pad_width = 0):\n\n Collimated_Pupil_padded = np.pad(Collimated_Pupil,pad_width=int(pad_width),mode='constant') \n\n f = np.fft.fft2(Collimated_Pupil_padded) #must be complex amplitude going in here\n fshift = np.fft.fftshift(f)\n intensity_image = (np.abs(fshift))**2\n \n return intensity_image", "def offset(freqs, re0, im0):\n return re0 + 1j * im0", "def step(self, chunk: th.Tensor) -> th.Tensor:\n for conv1d in self.enc_layers:\n chunk = conv1d(chunk)\n return chunk", "def intrinsic_impedance(self,freq):\n if freq == 0:\n return cmath.sqrt(self.mu/self.eps)\n else:\n gamma = self.propagation_constant(freq)\n w = 2*np.pi*freq\n return 1j*w*self.mu/gamma", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3", "def conv_forward(A_prev, W, b, activation,\n padding=\"same\", stride=(1, 1)):\n m, h_prev, w_prev, c_prev = A_prev.shape\n kh, kw, c_prev, c_new = W.shape\n sh, sw = stride\n\n if padding == 'same':\n pad_h = int(((h_prev * (sh - 1)) - sh + kh) / 2)\n pad_w = int(((w_prev * (sw - 1)) - sw + kw) / 2)\n elif type(padding) == tuple:\n pad_h, pad_w = padding\n else:\n pad_h = 0\n pad_w = 0\n img_pad = np.pad(A_prev, ((0, 0), (pad_h, pad_h),\n (pad_w, pad_w), (0, 0)),\n 'constant', constant_values=(0))\n img_pad_h = img_pad.shape[1]\n img_pad_w = img_pad.shape[2]\n h_out = int((img_pad_h - kh) / sh) + 1\n w_out = int((img_pad_w - kw) / sw) + 1\n result = np.zeros((m, h_out, w_out, c_new))\n for i in range(h_out):\n for j in range(w_out):\n for k in range(c_new):\n result[:, i, j, k] = np.sum(img_pad[:,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw] *\n W[:, :, :, k],\n axis=(1, 2, 3))\n return activation(result + b)", "def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n x = self.upsample(x)\n return x", "def forward(self, x, *args):\n # return x.repeat_interleave(self.kernel_size, dim=1)\n x = x.permute(0, 2, 1)\n x = torch.nn.functional.interpolate(x, scale_factor=self.kernel_size, mode='nearest')\n return x.permute(0, 2, 1)", "def transition_layer(X, nb_filters, compression):\n output = K.layers.BatchNormalization()(X)\n output = K.layers.Activation('relu')(output)\n output = K.layers.Conv2D(int(nb_filters * compression), 1,\n kernel_initializer='he_normal')(output)\n\n # transition layer\n X = K.layers.AvgPool2D(2)(output)\n # number of filters within the output\n nb_filters = int(nb_filters * compression)\n return X, nb_filters", "def __call__(self, x):\n if self._padding != 'REFLECT':\n padding = self._padding\n else:\n padding = 'VALID'\n pad_top = self.w[0] // 2\n pad_left = self.w[1] // 2\n if (self.w[0] - self._stride[1]) % 2 == 0:\n pad_bottom = pad_top\n else:\n pad_bottom = self.w[0] - self._stride[1] - pad_top\n if (self.w[1] - self._stride[2]) % 2 == 0:\n pad_right = pad_left\n else:\n pad_right = self.w[1] - self._stride[2] - pad_left\n x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right],\n [0, 0]], mode='REFLECT')\n y = tf.nn.conv2d(x, self._weight, strides=self._stride, padding=padding)\n return self._pre_scale * y + self._bias", "def forward(self, x):\n\n x = torch.unsqueeze(x, dim=1)\n x = F.pad(x, self.pad, mode=self.padmode, value=self.padvalue)\n x = self.conv(x, weight=self.weight.to(x.device), groups=self.groups)\n return torch.squeeze(x, dim=1)", "def _fp32_vnchwconv_process(axis_0_index, h_loop_idx, h_size):\n\n def _fp32_inner_vnchwconv(col_lp_idx, col_size):\n \"\"\"\n inner vnchwconv\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size + col_lp_idx * max_sub_w_size +\n h_loop_idx * max_sub_h_size * axis_2 +\n axis_0_index * axis_1 * axis_2)\n data_in_info = (h_size, col_size, axis_1, axis_2, in_offset)\n _data_move_in_mc_on_w(tik_inst, ub_input, data_in, data_in_info)\n\n # for this case, data_move will move in one more block\n with tik_inst.new_stmt_scope():\n h_size_temp = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(tik.all(axis_1 > data_size_one_block,\n h_size % data_size_one_block > 0)):\n h_size_temp.set_as(_ceil_div(h_size, data_size_one_block) *\n data_size_one_block)\n with tik_inst.else_scope():\n h_size_temp.set_as(h_size)\n # transpose by vnchwconv\n sub_hw_size = (h_size_temp, col_size)\n _transpose_by_2_vnchwconv(tik_inst, ub_input[ub_offset],\n ub_input, sub_hw_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size + col_lp_idx * max_sub_w_size) *\n axis_1 + h_loop_idx * max_sub_h_size +\n axis_0_index * axis_1 * axis_2)\n data_out_info = (h_size, col_size, axis_1, axis_2, out_offset)\n _data_move_out_mc_on_w(tik_inst, data_out, ub_input[ub_offset], data_out_info)\n\n with tik_inst.for_range(0, loop_cnt) as lp_idx:\n _fp32_inner_vnchwconv(lp_idx, max_sub_w_size)\n with tik_inst.if_scope(left_size > 0):\n _fp32_inner_vnchwconv(loop_cnt, left_size)", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def forward(self, x):\n\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.activation(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.upsample is not None:\n identity = self.upsample(x)\n\n out += identity\n out = self.activation(out)\n\n return out", "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n\r\n # Check weight shape.\r\n w = tf.convert_to_tensor(w)\r\n assert w.shape.rank == 4\r\n convH = w.shape[0]\r\n convW = w.shape[1]\r\n inC = Oncuda._shape(w, 2)\r\n outC = Oncuda._shape(w, 3)\r\n assert convW == convH\r\n\r\n # Setup filter kernel.\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * (gain * (factor ** 2))\r\n p = (k.shape[0] - factor) - (convW - 1)\r\n\r\n # Determine data dimensions.\r\n if data_format == 'NCHW':\r\n stride = [1, 1, factor, factor]\r\n output_shape = [Oncuda._shape(x, 0), outC, (Oncuda._shape(x, 2) - 1) * factor + convH, (Oncuda._shape(x, 3) - 1) * factor + convW]\r\n num_groups = Oncuda._shape(x, 1) // inC\r\n else:\r\n stride = [1, factor, factor, 1]\r\n output_shape = [Oncuda._shape(x, 0), (Oncuda._shape(x, 1) - 1) * factor + convH, (Oncuda._shape(x, 2) - 1) * factor + convW, outC]\r\n num_groups = Oncuda._shape(x, 3) // inC\r\n\r\n # Transpose weights.\r\n w = tf.reshape(w, [convH, convW, inC, num_groups, -1])\r\n w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\r\n w = tf.reshape(w, [convH, convW, -1, num_groups * inC])\r\n\r\n # Execute.\r\n x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)\r\n return Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl, gpu=gpu)", "def conv_layer(self, index, inputMatrix, numOfFilters, sizeOfFilter,\n stride):\n numOfChannels = inputMatrix.get_shape()[3]\n # int with numberOfChannels\n weight = tf.Variable(tf.truncated_normal(\n [sizeOfFilter, sizeOfFilter, int(numOfChannels), numOfFilters],\n stddev=0.1))\n bias = tf.Variable(tf.constant(0.1, shape=[numOfFilters]))\n padSize = sizeOfFilter // 2\n paddedInput = tf.pad(\n inputMatrix, ([[0, 0], [padSize, padSize], [padSize, padSize],\n [0, 0]]))\n conv = tf.nn.conv2d(paddedInput, weight, strides=[\n 1, stride, stride, 1], padding='VALID',\n name=str(index) + '_conv')\n conv_bias = tf.add(conv, bias, name=str(index) + '_conv')\n if self.verbose:\n print(' Layer %d Type: Conv Size: %dx%d Stride: %d No.Filters: %d '\n 'Input Channels : %d' % (index, sizeOfFilter, sizeOfFilter,\n stride, numOfFilters, numOfChannels))\n # leaky relu as mentioned in YOLO paper\n return tf.maximum(self.leakyReLUAlpha * conv_bias, conv_bias,\n name=str(index) + '_leaky_relu')", "def forward_pass(self, x):\n self.x = x\n # add bias\n x_w_ones = np.append(np.ones([x.shape[0], 1]), x, 1)\n w_b = np.concatenate((self.b, self.w))\n self.a = x_w_ones @ w_b\n \n # print(\"shape of x: \", x.shape, \"shape of x_w_ones: \", x_w_ones.shape)\n # print(\"shape of w: \", self.w.shape, \"shape of w_b: \", w_b.shape, \"shape of bias: \", self.b.shape)\n # print(\"shape of a: \", self.a.shape)\n \n return self.a", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def forward(self, inp):\n outp = []\n x = inp\n if self.resize_input:\n x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)\n if self.normalize_input:\n x = 2 * x - 1\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def setup_forward(self, W, input_data, prefix=\"\"):\n \n def loop_body(i, activations, outputcollect):\n \n if self.config['sequence_input']:\n # Cut out the correct input\n if self.config['net_input_add_onehot']:\n inp = tf.slice(input_data, (0,i), (self.config['batch_size'], 1), name=prefix+\"/inputSlice\") # <batch_size, 1>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size>\n inp = tf.one_hot(indices=inp, depth=self.config['num_input']) # <batch_size, num_input>\n else:\n inp = tf.slice(input_data, (0,i,0), (self.config['batch_size'], 1, self.config['num_input']), name=prefix+\"/inputSlice\") # <batch_size, 1, num_input>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size, num_input>\n else:\n inp = input_data\n inp = self.setup_print(inp, \"input data\")\n \n # Concatenate input, bias, activations\n inp = tf.concat([inp, self.bias, activations], axis=1, name=prefix+\"/stepconcat\") # <batch_size, from>\n inp = tf.expand_dims(inp, 1) # <batch_size, 1, from>\n \n # Fully connected\n # <batch_size, 1, to> <= <batch_size, 1, from> @ <batch_size, from, to>\n activations = tf.matmul(inp, W, name=prefix+\"/stepmatmul\")\n activations = tf.squeeze(activations, 1) # <batch_size, to>\n \n # Leaky ReLU\n # This allows values to blow up\n ## activations = tf.maximum(activations, activations * .3, name=prefix+\"/lrelu\")\n \n # Sigmoid\n activations = tf.sigmoid(activations) # <batch_size, to>\n \n # Store the output if we need outputs from all timesteps\n # Alternative may be: https://stackoverflow.com/questions/39157723/how-to-do-slice-assignment-in-tensorflow/43139565#43139565\n if self.config['sequence_output']:\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n output = tf.expand_dims(output, axis=1) # <batch_size, 1, output>\n outputcollect = tf.concat([outputcollect, output], axis=1)\n \n return tf.add(i,1), activations, outputcollect\n \n loop_out = tf.while_loop(\n cond=(lambda\n i, \n activations,\n outputcollect:\n tf.less(i, self.config['timesteps'])\n ),\n body=loop_body,\n loop_vars=[\n self.initial_i,\n self.initial_activations,\n self.initial_output\n ],\n shape_invariants=[\n self.initial_i.get_shape(),\n self.initial_activations.get_shape(),\n tf.TensorShape([self.config['batch_size'], None, self.config['num_output']])\n ],\n back_prop=False,\n # return_same_structure=True,\n name=prefix+\"/loop\"\n )\n \n # Get the output\n if self.config['sequence_output']:\n output = loop_out[2]\n # Set shape otherwise broadcasting messes this up\n output.set_shape((self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n activations = loop_out[1] # <batch_size, to>\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n\n if self.config['net_add_softmax']:\n # tf.nn.softmax\n output = tf.exp(output) / tf.expand_dims(tf.reduce_sum(tf.exp(output), axis=-1), axis=-1)\n \n return output", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur" ]
[ "0.6042097", "0.5845206", "0.5728535", "0.57079965", "0.5538608", "0.5477923", "0.54210114", "0.5398492", "0.53849536", "0.5383139", "0.5380405", "0.5379147", "0.5379049", "0.5376933", "0.53721017", "0.535263", "0.5341408", "0.53101605", "0.5305929", "0.5297572", "0.5297235", "0.52834666", "0.5278229", "0.5273604", "0.52632433", "0.5260427", "0.52445674", "0.5243293", "0.52253306", "0.5212236", "0.5209429", "0.5203469", "0.5196634", "0.5190412", "0.51901746", "0.5188828", "0.5188361", "0.5178833", "0.5177231", "0.5173874", "0.51590246", "0.5113539", "0.51070476", "0.5099884", "0.5095701", "0.50950444", "0.5089922", "0.5086108", "0.5084554", "0.5073007", "0.5071252", "0.5069943", "0.506281", "0.5062312", "0.5047965", "0.503975", "0.50304663", "0.50304663", "0.50272477", "0.50254714", "0.5025155", "0.50201523", "0.5015116", "0.5010424", "0.50102556", "0.50097966", "0.50068074", "0.5000328", "0.49993163", "0.4997004", "0.49949652", "0.4994734", "0.49901867", "0.4987748", "0.49874845", "0.49862376", "0.49833587", "0.49807435", "0.4979773", "0.49784017", "0.49781796", "0.4971739", "0.49697715", "0.49669972", "0.49641755", "0.4961138", "0.49580255", "0.49559623", "0.4951293", "0.49510357", "0.49486837", "0.4945944", "0.4940646", "0.49405175", "0.49394456", "0.49381903", "0.49375066", "0.49366188", "0.49356174", "0.4934111", "0.49312592" ]
0.0
-1
Convolution code adapted from pedros code and speed up with np mask logic
def convolution_nir(wav, flux, chip, R, FWHM_lim=5.0, plot=True): wav_chip, flux_chip = chip_selector(wav, flux, chip) #we need to calculate the FWHM at this value in order to set the starting point for the convolution FWHM_min = wav_chip[0]/R #FWHM at the extremes of vector FWHM_max = wav_chip[-1]/R #wide wavelength bin for the resolution_convolution wav_extended, flux_extended = fast_wav_selector(wav, flux, wav_chip[0]-FWHM_lim*FWHM_min, wav_chip[-1]+FWHM_lim*FWHM_max) print("wav_extended type", type(wav_extended)) wav_extended = np.array(wav_extended, dtype="float64") print("wav_extended type after arrayed", type(wav_extended)) # should be the same. flux_extended = np.array(flux_extended, dtype="float64") print("Starting the Resolution convolution...") # Predefine np array space flux_conv_res = np.empty_like(wav_chip, dtype="float64") counter = 0 base_val = len(wav_chip)//20 # Adjust here to change % between reports for n, wav in enumerate(wav_chip): # put value directly into the array flux_conv_res[n] = fast_convolve(wav, R, wav_extended, flux_extended, FWHM_lim) if(n%base_val== 0): counter = counter+5 print("Resolution Convolution at {}%%...".format(counter)) print("flux conv res type after loop", type(flux_conv_res)) flux_conv_res = np.array(flux_conv_res, dtype="float64") print("flux conv res type after np.array", type(flux_conv_res)) print("Done.\n") if(plot): fig=plt.figure(1) plt.xlabel(r"wavelength [ $\mu$m ])") plt.ylabel(r"flux [counts] ") plt.plot(wav_chip, flux_chip/np.max(flux_chip), color ='k', linestyle="-", label="Original spectra") plt.plot(wav_chip, flux_conv_res/np.max(flux_conv_res), color ='b', linestyle="-", label="Spectrum observed at and R=%d ." % (R)) plt.legend(loc='best') plt.show() return wav_chip, flux_conv_res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clConvolution(self, size, mask):", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n #start = time.time()\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]]).astype(float32)\r\n osh = output.shape\r\n\r\n assert c_kernel.conv2d_c(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d\") \r\n #end = time.time()\r\n\r\n #print(end - start) \r\n return output\r\n \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n print(input[m,strides[1]*i+di,strides[2]*j+dj,:])\r\n print(filter[di,dj,:,:])\r\n t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\r\n output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n #print(\"type(output)\")\r\n #print(type(output))\r\n return output\r\n '''", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result", "def convolve(self, kernel):\n kernel_rows, kernel_cols = kernel.shape\n img_rows, img_cols = self.img_array.shape\n\n print(\"imgae shape: \", self.img_array.shape)\n print(self.img_array[:10,:10])\n\n # flip the kernel\n flipped_kernel = np.zeros(kernel.shape)\n \n ## column flips\n for i in range(flipped_kernel.shape[1]):\n flipped_kernel[:,i] = kernel[:,kernel_cols-i-1]\n kernel = flipped_kernel.copy()\n\n ## row flips\n for i in range(flipped_kernel.shape[0]):\n flipped_kernel[i,:] = kernel[kernel_rows-i-1,:]\n kernel = flipped_kernel.copy()\n print(\"Flipped kernel:\\n\", kernel)\n\n # Handle broders by padding the image with white pixels.\n ## padwidth = kernel_rows // 2 \n padwidth = kernel_rows // 2\n self.img_array_padded = np.pad(self.img_array, padwidth, \n mode='constant', constant_values=255)\n \n # cross correlation\n self.img_array_out = np.zeros(self.img_array.shape)\n\n for y in range(img_cols):\n for x in range(img_rows):\n self.img_array_out[x, y] = \\\n (kernel * self.img_array_padded[x:x+kernel_cols, y:y+kernel_rows]).sum()\n \n # print(self.img_array_out.shape)\n # print(self.img_array_out[:10,:10])\n return self.img_array_out", "def convolve_grayscale_same(images, kernel):\n\n # num images\n n_images = images.shape[0]\n\n # input_width and input_height\n i_h = images.shape[1]\n i_w = images.shape[2]\n\n # kernel_width and kernel_height\n\n k_h = kernel.shape[0]\n k_w = kernel.shape[1]\n\n # pad_h ⊛ = int (k_h - 1)/2\n # pad_w ⊛ = int (k_w - 1)/2\n p_h = int((k_h - 1) / 2)\n p_w = int((k_w - 1) / 2)\n\n if k_h % 2 == 0:\n p_h = int(k_h / 2)\n\n if k_w % 2 == 0:\n p_w = int(k_w / 2)\n\n # output_height and output_width\n # H = i_h + 2pad - k_h + 1, W = i_w + 2pad - k_w + 1\n o_h = i_h + 2 * p_h - k_h + 1\n o_w = i_w + 2 * p_w - k_w + 1\n\n if k_h % 2 == 0:\n o_h = i_h + 2 * p_h - k_h\n\n if k_w % 2 == 0:\n o_w = i_w + 2 * p_w - k_w\n\n # creating outputs of size: n_images, o_h x o_w\n outputs = np.zeros((n_images, o_h, o_w))\n\n # creating pad of zeros around the output images\n padded_imgs = np.pad(images,\n pad_width=((0, 0), (p_h, p_h), (p_w, p_w)),\n mode=\"constant\",\n constant_values=0)\n\n # vectorizing the n_images into an array\n imgs_arr = np.arange(0, n_images)\n\n # iterating over the output array and generating the convolution\n for x in range(o_h):\n for y in range(o_w):\n x1 = x + k_h\n y1 = y + k_w\n outputs[imgs_arr, x, y] = np.sum(np.multiply(\n padded_imgs[imgs_arr, x: x1, y: y1], kernel), axis=(1, 2))\n\n return outputs", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n #ans = np.zeros(input_vals[0].shape)\r\n #assert len(input_vals) == 3\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n ans = np.zeros(tuple(ish)).astype(float32)\r\n #output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]])\r\n output_grad = input_vals[2].astype(float32)\r\n osh = output_grad.shape\r\n #print(fsh)\r\n #print(ish)\r\n assert c_kernel.conv2d_c_grad1(get_pointer(ans), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output_grad), osh[0],osh[1],osh[2],osh[3])==0\r\n ish = list(input_vals[0].shape)\r\n #end = time.time()\r\n\r\n #print(\"conv2d_grad1\") \r\n #print(end - start) \r\n return ans[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:] \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n #print(input[m,strides[1]*i+di,strides[2]*j+dj,:].shape)\r\n #print(filter[di,dj,:,:])\r\n \"\"\"t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\"\"\"\r\n #print(matB)\r\n #print(np.dot(matA , matB))\r\n print(np.array(output_grad[m,i,j]))\r\n print(np.array(np.array(filter[di,dj,:,:].T)))\r\n ans[m,strides[1]*i+di,strides[2]*j+dj,:]+= np.dot(np.array(output_grad[m,i,j].reshape((1,-1))),np.array(filter[di,dj,:,:].T)).reshape((-1,));\r\n \"\"\"output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n \"\"\"\r\n #output += t\r\n ish = list(input_vals[0].shape)\r\n \r\n return ans[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]'''", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def img_conv(X, filter):\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n x_size = filter.shape[0] // 2\n y_size = filter.shape[1] // 2\n w = X.shape[0]\n h = X.shape[1]\n out = numpy.zeros(X.shape)\n for r in range(w):\n for c in range(h):\n for x in range(filter.shape[0]):\n pixel_x = r + x - x_size\n if pixel_x < 0:\n pixel_x = -pixel_x\n if pixel_x >= w:\n pixel_x = w - pixel_x - 2\n for y in range(filter.shape[1]):\n pixel_y = c + y - y_size\n if pixel_y < 0:\n pixel_y = -pixel_y\n if pixel_y >= h:\n pixel_y = h - pixel_y - 2\n #if pixel_x >= 0 and pixel_x < w and pixel_y >= 0 and pixel_y < h:\n out[r, c] += filter[x, y] * X[pixel_x, pixel_y]\n return out", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n ans = np.zeros(input_vals[1].shape).astype(float32)\r\n #assert len(input_vals) == 3\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output_grad = input_vals[2].astype(float32)\r\n osh = output_grad.shape\r\n assert c_kernel.conv2d_c_grad2(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(ans),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output_grad), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d_grad2\") \r\n #end = time.time()\r\n \r\n #print(end - start) \r\n return ans\r\n \r\n '''rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n \"\"\"t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\"\"\"\r\n #print(input[m,strides[1]*i+di,strides[2]*j+dj,:].shape)\r\n #print(output_grad[m,i,j].shape)\r\n ans[di,dj,:,:] += np.dot(input[m,strides[1]*i+di,strides[2]*j+dj,:].reshape((-1,1)), output_grad[m,i,j].reshape((1,-1)))\r\n \"\"\"output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\"\"\"\r\n return ans'''", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def convolve_spikes_2d(spikes_a,spikes_b,kernel_a,kernel_b):\n output = np.zeros((spikes_a.shape[0]+kernel_a.shape[0],kernel_a.shape[1]*kernel_b.shape[1]))\n for k_i in range(kernel_a.shape[1]):\n for k_j in range(kernel_b.shape[1]):\n mat = np.zeros((kernel_a.shape[0],kernel_b.shape[0]))\n #for l_1 in range(kernel_a.shape[0]):\n # for l_2 in range(kernel_b.shape[0]):\n # mat[l_1,l_2] = kernel_a[l_1,k_i] * kernel_b[l_2,k_j]\n for i in np.where(spikes_a)[0]:\n for j in np.where(spikes_b[(i+1):(i+1+kernel_b.shape[0])])[0]:\n if j < kernel_b.shape[0]:\n output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] = output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] + kernel_a[:,k_i] * kernel_b[j,k_j] #mat[:,j-i]\n return output[:spikes_a.shape[0],:]", "def convolution(img, kernel, padding='fill'):\n kernel = np.rot90(kernel, 2)\n h,w = kernel.shape[:2]\n t,b,l,r = (h-1)//2, h//2, (w-1)//2, w//2 # Use numpy padding because it works for >2d\n padshape = [(t,b),(l,r)]+[(0,0)]*(len(img.shape[2:]))\n padded_img = np.pad(img, padshape, mode={'fill':'constant','replicate':'edge'}[padding])\n conved_img = np.zeros_like(img)\n for i in 1+np.arange(-h//2,h//2):\n for j in 1+np.arange(-w//2,w//2):\n if kernel[t+i,l+j]==0: continue\n conved_img += kernel[t+i,l+j]*padded_img[t+i:-b+i or None,l+j:-r+j or None]\n return conved_img", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def loop_conv(X, W):\n # Go over all five dimensions \n # (#batches x #channels x #height x #width x #dur/length )\n # with filter that has\n # #filters x #channels x #height x #width x #dur/length \n num_filters = W.shape[0]\n filt_channels = W.shape[1]\n filt_height = W.shape[2]\n filt_width = W.shape[3]\n filt_duration = W.shape[4]\n num_batches = X.shape[0]\n input_channels = X.shape[1]\n assert(filt_channels == input_channels)\n out_shape = compute_out_shape(X.shape, W.shape)\n out_height = out_shape[2]\n out_width = out_shape[3]\n out_duration = out_shape[4]\n \n # The output is H :)\n H = np.zeros((out_shape))\n for batch_i in xrange(0, num_batches):\n for filt_i in xrange(0, num_filters):\n for out_x in xrange(0, out_height):\n for out_y in xrange(0, out_width):\n for out_z in xrange(0, out_duration):\n for chan_i in xrange(0, filt_channels):\n for filt_x in xrange(0, filt_height):\n for filt_y in xrange(0, filt_width):\n for filt_z in xrange(0, filt_duration):\n weight = W[filt_i, chan_i, filt_x, filt_y, filt_z]\n input_val = X[batch_i, chan_i, \\\n out_x + filt_x, out_y + filt_y, out_z + filt_z]\n H[batch_i, filt_i, out_x, out_y, out_z] += \\\n weight * input_val\n return H", "def conv2D(null,channels,X,stride,kernel_shape,padding = False,initialize_weights = True,*args):\n # filters = dimensionality of output space\n # If padding is enabled, we pad the input with zeros such that the input size\n # remains the same if weights with stride 1 are applied to the input\n if initialize_weights:\n kernel = np.random.normal(size = (kernel_shape[0],kernel_shape[1],kernel_shape[2]))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n kernel = torch.FloatTensor(kernel)\n kernel.requires_grad = False\n else:\n kernel = args[0] # weights and bias must be given if initialise weights is disabled\n bias = args[1]\n kernel_shape = kernel.shape\n \n X = X.detach().numpy()\n if padding: # Can only pad during initialization -> weights and input shapes cannot change during feedforward and backpropagation\n if kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 != 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 != 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n else:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n \n X = torch.FloatTensor(X)\n \n img_shape = X.shape\n \n output_size1 = math.floor((img_shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((img_shape[2] - kernel_shape[2])/(stride)) + 1\n output_shape = [channels,output_size1,output_size2]\n \n X_im2col,im = im2col(X,kernel,stride)\n \n \n if initialize_weights:\n weight = torch.reshape(kernel,(kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))\n # weight consists of only one weight vector. But the dimensionality of output space has to be\n # num_filters. So we need to stack weight vectors horizontally and create num_filters number of\n # feature maps\n for i in range(channels-1):\n weight2 = np.random.normal(size = (kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n weight2 = torch.FloatTensor(weight2)\n weight2.requires_grad = False\n weight = torch.cat((weight2, weight),1) # do this num_filters - 1 number of times\n conv_output = torch.t(X_im2col).mm(weight)\n bias = torch.Tensor(np.random.normal(size = conv_output.shape))\n conv_output += bias\n conv_output = torch.reshape(conv_output,(output_shape))\n return torch.nn.Parameter(conv_output), torch.nn.Parameter(weight),X_im2col,im, output_shape,bias\n else:\n # Since weights are already initialised, the relevant channels are already dictated in the architecture.\n # Therefore, conv output is just a matmul\n conv_output = torch.t(X_im2col).mm(kernel) + bias\n return torch.nn.Parameter(conv_output),X_im2col", "def convolution(image, kernel):\n\n #Se encuentra la dimencion de la imagen\n if len(image.shape) == 3: #De 3 dimenciones\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Se cambia a dos dimenciones\n print(\"Nuevas dimenciones: {}\".format(image.shape))\n else:\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n kernel_row, kernel_col = kernel.shape #asigna alto y ancho del filtro\n\n output_x = (image_col - (kernel_col / 2) * 2) + 1 #asigna el ancho del output\n output_y = (image_row - (kernel_row / 2) * 2) + 1 #asigna el alto del output\n \n output = np.zeros([int(output_y), int(output_x)]) #matriz donde se guarda el resultado\n\n padded_size = int((kernel_row - 1) / 2) #Tamaño de padding\n\n #Obtenemos la imagen con padding\n padded_image = padding(image,padded_size)\n \n for row in range(int(output_y)):\n for col in range(int(output_x)):\n output[row, col] = conv_helper(\n padded_image[row:row + kernel_row, \n col:col + kernel_col], kernel)\n \n # Se muestra la imagen en pantalla\n plt.imshow(output, cmap='gray')\n plt.title(\"Edge detection\")\n plt.show()\n\n return output", "def convolve_grayscale_same(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n # conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n conved = np.zeros(imgshape)\n ph = int((kh) / 2)\n pw = int((kw) / 2)\n # print(conved.shape)\n # print(kernel.shape, images.shape)\n # print(kernel[None, :, :].shape)\n padimg = np.pad(images, ((0, 0), (ph, ph), (pw, pw)), 'constant',\n constant_values=0)\n for i in range(0, h):\n for j in range(0, w):\n subs = padimg[:, i:i + kh, j:j + kw]\n # ip = i + ph\n # jp = j + pw\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def convolution(image: np.array, kernel: np.array) -> np.array:\n\n # default condition: apply SAME padding, and keep stride at 1\n stride_x = 1\n stride_y = 1\n padding_y = int(len(kernel - 1) / 2)\n padding_x = int(len((kernel[0]) - 1) / 2)\n # create the return array with with the same dimensions as <image>,\n # and then create a padded image\n convolved_image = np.zeros((len(image), len(image[0])))\n padded_image = np.zeros((len(image) + 2 * padding_y,\n len(image[0]) + 2 * padding_x))\n padded_image[padding_x: -padding_x, padding_y: -padding_y] = image\n\n for py in range(0, len(padded_image) - len(kernel), stride_y):\n for px in range(0, len(padded_image[0]) - len(kernel[0]), stride_x):\n # scan the matrix over columns in image array, then shift the matrix\n # down, and repeat\n padded_image_section = padded_image[py: py + len(kernel[0]),\n px: px + len(kernel)]\n # print(padded_image_section)\n convolved_image[py, px] = int(np.tensordot(padded_image_section,\n kernel))\n\n return convolved_image", "def _cconvolve(x, H, nfft, wlen, axis):\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real", "def convolve_grayscale_valid(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n\n for i in range(0, h - kh + 1):\n for j in range(0, w - kw + 1):\n subs = images[:, i:i + kh, j:j + kw]\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved", "def convolution(matrix, kernel):\n assert assert_odd(kernel.shape[0])\n \n # Padded matrix (0s on the outsides)\n N = kernel.shape[0] # Get the dim for the kernel\n I = np.pad(matrix, int(N/2), \"constant\")\n \n # Now do the convolution\n C = np.zeros(matrix.shape) # This is the convolved image\n h, w = C.shape # Get width and height\n s = int(N/2) # Spacing of the matrix\n positions = [(i,j) for i in range(h) for j in range(w)]\n for (i,j) in positions:\n y, x = i+s,j+s # Shift the center to the right position\n \n # Calc the convolution at each pixel\n C[i,j] = np.sum(np.multiply(kernel, I[y-s:y+s+1,x-s:x+s+1]))\n \n # Return the clipped array as uint8\n return C", "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def linconv(nx):", "def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result", "def fftconvolve(array, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpynp.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x * y))))", "def discreteConvolution2D( iImage, iKernel ): \n # pretvori vhodne spremenljivke v np polje in\n # inicializiraj izhodno np polje\n iImage = np.asarray( iImage )\n iKernel = np.asarray( iKernel )\n #------------------------------- za hitrost delovanja\n oImage = ni.convolve( iImage, iKernel, mode='nearest' ) \n return oImage", "def conv_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n k = np.flip(np.flip(kernel, 1), 0)\n padding_image = zero_pad(image, Hk//2, Wk//2)\n for i in range(Hi):\n for j in range(Wi):\n out[i, j] = np.sum(np.multiply(padding_image[i:i+Hk, j:j+Wk], k))\n\n return out", "def conv2d_forward(x, w, b, pad, stride):\n #raise NotImplementedError\n \n\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n ba,h,wd,c=x.shape\n f,fh,fw,c=w.shape\n n_h=((h-fh+2*pad)//stride)+1\n n_w=((wd-fw+2*pad)//stride)+1\n x_paded=np.pad(x,pad,'constant')\n temp_dim=x_paded.shape[3]\n #print(temp_dim)\n out=np.zeros((ba,n_h,n_w,f))\n for m in range(0,ba):\n for i in range(0,n_h):\n for j in range(0,n_w):\n for n in range(0,f):\n h_t=i*stride\n h_t2=i*stride+fh\n w_t=j*stride\n w_t2=j*stride+fw\n temp=x_paded[pad+m,h_t:h_t2,w_t:w_t2,pad:temp_dim-pad] \n out[m,i,j,n]=np.sum(temp*w[n,:,:,:])+b[n]\n \n return out", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernels.shape[0]\n kw = kernels.shape[1]\n nc = kernels.shape[3]\n sh = stride[0]\n sw = stride[1]\n\n if padding == 'same':\n ph = max((h - 1) * sh + kh - h, 0)\n pt = int(np.ceil(ph / 2))\n pb = pt\n pw = max((w - 1) * sw + kw - w, 0)\n pl = int(np.ceil(pw / 2))\n pr = pl\n elif padding == 'valid':\n pt, pb, pl, pr = 0, 0, 0, 0\n else:\n pt, pb = padding[0], padding[0]\n pl, pr = padding[1], padding[1]\n\n oh = ((h - kh + pt + pb) // sh) + 1\n ow = ((w - kw + pl + pr) // sw) + 1\n\n images = np.pad(images, pad_width=((0, 0), (pt, pb), (pl, pr), (0, 0)),\n mode='constant', constant_values=0)\n\n conv = np.zeros((m, oh, ow, nc))\n for k in range(nc):\n for i in range(oh):\n for j in range(ow):\n aux = images[:, i * sh:i * sh + kh, j * sw:j * sw + kw] \\\n * kernels[:, :, :, k]\n conv[:, i, j, k] = np.sum(aux, axis=(1, 2, 3))\n return conv", "def my1_conv2d(image, kernels, strides=(1, 1)):\n raise NotImplementedError('Write me!')", "def dense_conv_forward_2d_fast(\n inp_image: np.ndarray,\n filter: np.ndarray,\n output: np.ndarray,\n strides,\n padding):\n INPUT_DIMENSIONS = 4\n HEIGHT_IDX = 2 # H\n WIDTH_IDX = 3 # W\n INPUT_CHANNELS_IDX = 1 # C\n OUTPUT_CHANNELS_IDX = 0 # K\n NUM_IMAGES_IDX = 0 # N\n\n x_shape = inp_image.shape\n f_shape = filter.shape\n o_shape = output.shape\n\n if len(x_shape) != INPUT_DIMENSIONS or len(f_shape) != INPUT_DIMENSIONS or len(o_shape) != INPUT_DIMENSIONS:\n raise RuntimeError(\"conv2d: input, filter, and output must all have four dimensions.\")\n\n assert (x_shape[HEIGHT_IDX] % strides[1] == 0, \"Input height is not evenly divisible by stride size.\")\n assert (x_shape[WIDTH_IDX] % strides[0] == 0, \"Input width is not evenly divisible by stride size.\")\n assert (x_shape[INPUT_CHANNELS_IDX] == f_shape[INPUT_CHANNELS_IDX],\n \"Number of channels in input does not match number channels expected by convolution.\")\n assert (o_shape[OUTPUT_CHANNELS_IDX] == f_shape[OUTPUT_CHANNELS_IDX],\n \"Number of channels in output does not match number channels expected by convolution.\")\n\n N = x_shape[NUM_IMAGES_IDX]\n H = x_shape[HEIGHT_IDX]\n W = x_shape[WIDTH_IDX]\n K = f_shape[OUTPUT_CHANNELS_IDX]\n C = f_shape[INPUT_CHANNELS_IDX]\n R = f_shape[HEIGHT_IDX]\n S = f_shape[WIDTH_IDX]\n P = (x_shape[HEIGHT_IDX] - f_shape[HEIGHT_IDX]) / strides[1] + 1 # output height\n Q = (x_shape[WIDTH_IDX] - f_shape[WIDTH_IDX]) / strides[0] + 1 # output width\n\n assert (o_shape[HEIGHT_IDX] == P, f\"Output height should be {P}.\")\n assert (o_shape[WIDTH_IDX] == Q, f\"Output width should be {Q}.\")\n\n # output[...] = 0\n\n for i in range(0, (H - R) + 1, strides[1]):\n y = int(i / strides[1])\n for j in range(0, (W - S) + 1, strides[0]):\n x = int(j / strides[1])\n inp_view = inp_image[:, :, i:i + R, j:j + S]\n for k in range(0, K):\n f_slice = filter[k, :, :, :]\n prod = np.sum(inp_view * f_slice, (INPUT_CHANNELS_IDX, HEIGHT_IDX, WIDTH_IDX))\n output[:, k, y, x] = prod", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def conv4d(data,filters,bias=None,permute_filters=True,use_half=False):\n b,c,h,w,d,t=data.size()\n\n data=data.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n \n # Same permutation is done with filters, unless already provided with permutation\n if permute_filters:\n filters=filters.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n\n c_out=filters.size(1)\n if use_half:\n output = Variable(torch.HalfTensor(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n else:\n output = Variable(torch.zeros(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n \n padding=filters.size(0)//2\n if use_half:\n Z=Variable(torch.zeros(padding,b,c,w,d,t).half())\n else:\n Z=Variable(torch.zeros(padding,b,c,w,d,t))\n \n if data.is_cuda:\n Z=Z.cuda(data.get_device()) \n output=output.cuda(data.get_device())\n \n data_padded = torch.cat((Z,data,Z),0)\n \n\n for i in range(output.size(0)): # loop on first feature dimension\n # convolve with center channel of filter (at position=padding)\n output[i,:,:,:,:,:]=F.conv3d(data_padded[i+padding,:,:,:,:,:], \n filters[padding,:,:,:,:,:], bias=bias, stride=1, padding=padding)\n # convolve with upper/lower channels of filter (at postions [:padding] [padding+1:])\n for p in range(1,padding+1):\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding-p,:,:,:,:,:], \n filters[padding-p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding+p,:,:,:,:,:], \n filters[padding+p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n\n output=output.permute(1,2,0,3,4,5).contiguous()\n return output", "def py_conv(img, kern, mode, subsample):\r\n if imported_scipy_convolve2d:\r\n return py_conv_scipy(img, kern, mode, subsample)\r\n elif mode == 'valid':\r\n return py_conv_valid_numpy(img, kern)[:, :, ::subsample[0],\r\n ::subsample[1]]\r\n elif mode == 'full':\r\n return py_conv_full_numpy(img, kern)[:, :, ::subsample[0],\r\n ::subsample[1]]\r\n else:\r\n raise Exception(\"Can't execute this kernel.\")", "def py_conv(img, kern, mode, subsample):\r\n if imported_scipy_convolve2d:\r\n return py_conv_scipy(img, kern, mode, subsample)\r\n elif mode == 'valid':\r\n return py_conv_valid_numpy(img, kern)[:, :, ::subsample[0],\r\n ::subsample[1]]\r\n elif mode == 'full':\r\n return py_conv_full_numpy(img, kern)[:, :, ::subsample[0],\r\n ::subsample[1]]\r\n else:\r\n raise Exception(\"Can't execute this kernel.\")", "def dense_conv_forward_2d(inp_image: np.ndarray, kernel: np.ndarray, stride, padding):\n assert len(inp_image.shape) == 3, 'single 2D images only. No batches.'\n assert len(kernel.shape) == 4\n\n height, width, colors = inp_image.shape\n kernel_height, kernel_width, colors_in, colors_out = kernel.shape\n kernel_stride_x, kernel_stride_y = stride\n kernel_padding_x, kernel_padding_y = padding\n i_f = int(np.floor(kernel_width / 2.0))\n j_f = int(np.floor(kernel_height / 2.0))\n\n out_pixels = np.zeros((height, width, colors_out))\n for y in range(kernel_padding_y, height - kernel_padding_y,\n kernel_stride_y): # todo: add kernel_padding_y and kernel_stride_y fix to glsl\n for x in range(kernel_padding_x, width - kernel_padding_x,\n kernel_stride_x): # todo: add kernel_padding_x and kernel_stride_x fix to glsl\n output_select = [y, x, 0]\n input_select = np.asarray(\n [y * kernel_stride_y, x * kernel_stride_x, 0]\n )\n for i in range(-np.int(np.floor(kernel_width / 2.0)), np.int(np.ceil(kernel_width / 2.0))):\n for j in range(-np.int(np.floor(kernel_height / 2.0)), np.int(np.ceil(kernel_height / 2.0))):\n in_pixel_select = np.copy(input_select)\n in_pixel_select += [j, i, 0]\n for co in range(colors_out):\n output_select[2] = co\n for ci in range(colors_in):\n in_pixel_select[2] = ci\n kernel_select = np.asarray([j_f + j, i_f + i, ci, co])\n\n out_pixels[tuple(output_select)] += kernel[tuple(kernel_select)] * inp_image[\n tuple(in_pixel_select)]\n return out_pixels", "def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1, 1), **kargs):\r\n\r\n #accept Constant value for image_shape and filter_shape.\r\n if image_shape is not None:\r\n image_shape = list(image_shape)\r\n for i in xrange(len(image_shape)):\r\n if image_shape[i] is not None:\r\n try:\r\n image_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(image_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the image_shape parameter\" %\r\n image_shape[i])\r\n assert str(image_shape[i].dtype).startswith('int')\r\n image_shape[i] = int(image_shape[i])\r\n if filter_shape is not None:\r\n filter_shape = list(filter_shape)\r\n for i in xrange(len(filter_shape)):\r\n if filter_shape[i] is not None:\r\n try:\r\n filter_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(filter_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the filter_shape \"\r\n \"parameter\" % filter_shape[i])\r\n assert str(filter_shape[i].dtype).startswith('int')\r\n filter_shape[i] = int(filter_shape[i])\r\n\r\n if image_shape and filter_shape:\r\n try:\r\n assert image_shape[1] == filter_shape[1]\r\n except Exception:\r\n print 'image ', image_shape, ' filters ', filter_shape\r\n raise\r\n\r\n if filter_shape is not None:\r\n nkern = filter_shape[0]\r\n kshp = filter_shape[2:]\r\n else:\r\n nkern, kshp = None, None\r\n\r\n if image_shape is not None:\r\n bsize = image_shape[0]\r\n imshp = image_shape[1:]\r\n else:\r\n bsize, imshp = None, None\r\n\r\n op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)\r\n\r\n return op(input, filters)", "def test_positional_convolution_forward(ctx):\n # num_batch * channel * height * width input\n # i.e. (2, 2, 6, 6)\n in_data = \\\n mx.nd.array(\n [\n [[[1, 2, -1, 0, 1, 1],\n [3, 6, -5, 4, 2, -2],\n [9, 6, -1, 3, 1, 3],\n [4, 2, 5, 7, 3, 1],\n [0, 1, 1, 2, 2, 1],\n [3, 1, 2, 4, 3, 3]],\n\n [[3, 1, 2, 4, 3, 3],\n [0, 1, 1, 2, 2, 1],\n [4, 2, 5, 7, 3, 1],\n [9, 6, -1, 3, 1, 3],\n [3, 6, -5, 4, 2, -2],\n [1, 2, -1, 0, 1, 1]]],\n [[[1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1],\n [0, 0, 1, 1, 2, 2],\n [3, 3, 0, -1, -1, -2],\n [3, 1, 0, 3, 3, 2],\n [5, 6, 7, -1, -2, 0]],\n\n [[5, 6, 7, -1, -2, 0],\n [3, 1, 0, 3, 3, 2],\n [3, 3, 0, -1, -1, -2],\n [0, 0, 1, 1, 2, 2],\n [6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6]]]\n ], ctx=ctx)\n\n # num_filter * channel * K * K weight\n # i.e. (2, 2, 3, 3)\n weight = \\\n mx.nd.array(\n [\n [[[1, 0, 1],\n [0, 2, -1],\n [2, 3, 1]],\n\n [[1, 1, 0],\n [2, -1, 2],\n [3, -2, 4]]],\n\n [[[0, 1, 2],\n [-1, 2, 3],\n [4, 1, -5]],\n\n [[3, 0, -1],\n [-1, 2, 1],\n [5, 6, 2]]]\n ], ctx=ctx)\n\n # num_batch * channel * out_height * out_width scale\n # i.e. (2, 2, 6, 6)\n scale = \\\n mx.nd.array(\n [\n [[[1, 1, 1, 1, 1, 1],\n [1, -1, 1, -1, 1, -1],\n [-1, 1, -1, 1, -1, 1],\n [-1, -1, -1, -1, -1, -1],\n [2, 1, 2, 2, 1, 1],\n [1, 2, 1, 2, 1, 2]],\n\n [[1, 1, 1, 1, 1, 1],\n [1, -1, -1, 1, 1, 1],\n [-1, 1, -1, 1, -1, 1],\n [1, -1, -1, -1, -1, 1],\n [2, -1, 2, -2, 1, 1],\n [1, 2, 1, 2, 1, 2]]],\n\n [[[6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6],\n [1, -1, 2, -2, 3, -3],\n [4, -4, 5, -5, 6, -6],\n [1, 1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1, -1]],\n\n [[-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1],\n [4, -4, 5, -5, 6, -6],\n [1, -1, 2, -2, 3, -3],\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1]]],\n ], ctx=ctx)\n\n # num_filter bias\n # i.e. (2, )\n bias = \\\n mx.nd.array(\n [1, 2], ctx=ctx)\n\n in_data_var = mx.symbol.Variable(name=\"in_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n scale_var = mx.symbol.Variable(name=\"scale\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n\n op = mx.symbol.contrib.PositionalConvolution(name='test_positional_convolution',\n data=in_data_var,\n scale=scale_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=2,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n be = op.bind(ctx=ctx, args={'in_data': in_data,\n 'scale': scale,\n 'weight': weight,\n 'bias': bias})\n be.forward(True)\n out_o = be.outputs[0].asnumpy()\n print(out_o)", "def convolve(self, *args, **kwargs):\n return _image.image_convolve(self, *args, **kwargs)", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def convolve_spikes(spikes,kernel):\n output = np.zeros((spikes.shape[0]+kernel.shape[0]+1,kernel.shape[1]))\n for i in np.where(spikes)[0]:\n output[(i+1):(i+1+kernel.shape[0]),:] = output[(i+1):(i+1+kernel.shape[0]),:] + kernel\n return output[:len(spikes),:]", "def _valid_convolve(images: th.Tensor, kernels: th.Tensor) -> th.Tensor:\n ret = F.conv2d(images.view((images.shape[0], *images.shape[-3:])).transpose(1, 0),\n th.flip(kernels.view((kernels.shape[0], *kernels.shape[-3:])), dims=(-1, -2)),\n groups=kernels.shape[0]).transpose(1, 0)\n return ret", "def PrimaryCap(inputs, dim_vector, n_channels, kernel_size, strides, padding):\n output = layers.Conv1D(filters=dim_vector * n_channels, kernel_size=kernel_size, strides=strides, padding=padding)(\n inputs)\n \n #output = Activation(activation=\"relu\")(output)\n outputs = layers.Reshape(target_shape=[-1, dim_vector])(output)\n return layers.Lambda(squash)(outputs)", "def convolve_seq(a, b):\n out = []\n a_dims = a.shape\n b = as_strided_seq(b, 5, 1)\n\n for ctr in range(a_dims[1]):\n if isinstance(a, np.ndarray):\n temp = a[0][ctr]\n tt = npo.flipud(temp)\n tt = npo.fliplr(tt)\n a[0][ctr] = tt\n else:\n a = a._value\n temp = a[0][ctr]\n tt = npo.flipud(temp)\n tt = npo.fliplr(tt)\n a[0][ctr] = tt\n\n if not isinstance(b[0][0][0][0][0][0], np.float):\n s = b.shape\n temp_b = np.empty(s)\n for i in range(s[0]):\n for j in range(s[1]):\n for k in range(s[2]):\n for l in range(s[3]):\n for m in range(s[4]):\n for n in range(s[5]):\n try:\n val = b[i][j][k][l][m][n]._value\n temp_b[i][j][k][l][m][n] = val\n except:\n val = b[i][j][k][l][m][n]\n temp_b[i][j][k][l][m][n] = val\n\n\n b_dims = b.shape\n ex_ct = b_dims[0]\n for ctr in range(ex_ct):\n filters = []\n for i in range(a_dims[1]):\n arr = []\n for j in range(b_dims[1]):\n row = []\n for k in range(b_dims[2]):\n filter = a[:, i, :, :]\n #filter = filter[0]\n patch = b[ctr, j, k, :, :, :]\n try:\n temp = npo.einsum('ijk,ijk->', filter, patch)\n except:\n #print(\"boo\")\n patch = temp_b[ctr, j, k, :, :, :]\n temp = npo.einsum('ijk,ijk->', filter, patch)\n row.append(temp)\n if len(arr) == 0:\n arr = npo.array([row])\n row = []\n else:\n arr = npo.vstack((arr, [row]))\n row = []\n if len(filters) == 0:\n filters = npo.array([arr])\n arr = []\n else:\n filters = npo.vstack((filters, [arr]))\n arr = []\n if len(out) == 0:\n out = npo.array([filters])\n filters = []\n else:\n out = npo.vstack((out, [filters]))\n filters = []\n return out", "def convolve_grayscale_same(images, kernel):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n padh = int(kh / 2)\n padw = int(kw / 2)\n pad = ((0, 0), (padh, padh), (padw, padw))\n conv = np.zeros([m, h, w])\n imagePad = np.pad(images, pad_width=pad, mode='constant')\n for i in range(h):\n for j in range(w):\n image = imagePad[:, i:i+kh, j:j+kw]\n conv[:, i, j] = np.multiply(image, kernel).sum(axis=(1, 2))\n return conv", "def clPooling(self, size, stride=(1, 1), mask=(3, 3), maxPool=True):", "def moffat_convolution(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\t\n im_kernel_array = gauss_kernel(n_fwhm,beta,r_s)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n N, F, Hc, Wc = dout.shape\n stride = conv_param['stride']\n\n print(dout.shape)\n print(x.shape)\n print(w.shape)\n\n #dout = np.pad(dout, ((0,0),(0,0),(1,1),(1,1)), mode='constant', constant_values=0)\n xp = np.pad(x, ((0,0),(0,0),(1,1),(1,1)), mode='constant', constant_values=0)\n\n db = np.array([np.sum(dout[:,i,:,:]) for i in xrange(F)])\n dw = np.random.randn(F, C, HH, WW)\n for f in xrange(F):\n for c in xrange(C):\n for hh in xrange(HH):\n for ww in xrange(WW):\n dw[f, c, hh, ww] = np.sum(dout[:, f, :, :] * xp[:, c, hh:H+hh:stride, ww:W+ww:stride])\n\n dx = np.zeros(x.shape)\n dx = np.pad(dx, ((0,0), (0,0), (1,1), (1,1)), mode='constant', constant_values=0)\n for i in xrange(N):\n for hh in xrange(HH):\n for ww in xrange(WW):\n whw = w[:, :, hh, ww].T\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n he = hc * stride + hh\n wi = wc * stride + ww\n dx[i, :, he, wi] += np.sum(whw * dout[i, :, hc, wc], axis=1)\n \n dx = dx[:, :, 1:-1, 1:-1]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'):\n\n assert(data_arr.ndim == 2)\n output_shape = discrete_kernel_shape[1:]\n if (kernel_arr.ndim == 2):\n # Algorithm assumes a \"to\" axis on the kernel. Add it.\n kernel_arr = add_axes(kernel_arr, 1, 'before last')\n discrete_kernel_shape = discrete_kernel_shape[0:1] + (1,) + discrete_kernel_shape[1:2]\n else:\n check(kernel_arr.ndim == 3)\n\n # Convolutions leave the time component on the inside, but we want it on the outside\n # So we do the iterations in reverse order, and flip the result with transpose()\n # The result is indexed as [tidx][to idx][from idx]\n if cf.use_theano:\n # We use slices from_idx:from_idx+1 because conv2d expects 2D objects\n # We then index [:,0] to remove the spurious dimension\n result = T.stack(\n [ T.stack(\n [ T.signal.conv.conv2d(data_arr[:, from_idx:from_idx+1 ],\n kernel_arr[:, to_idx, from_idx:from_idx+1 ],\n image_shape = (tarr_len, 1),\n filter_shape = (discrete_kernel_shape[0], 1),\n border_mode = mode)[:,0]\n for to_idx in np.arange(discrete_kernel_shape[1]) ] )\n for from_idx in np.arange(discrete_kernel_shape[2]) ] ).T\n else:\n assert(discrete_kernel_shape == kernel_arr.shape)\n assert(tarr_len == data_arr.shape[0])\n result = np.stack(\n [ np.stack(\n [ scipy.signal.convolve(data_arr[:, from_idx ],\n kernel_arr[:, to_idx, from_idx ],\n mode=mode)\n for to_idx in np.arange(kernel_arr.shape[1]) ] )\n for from_idx in np.arange(kernel_arr.shape[2]) ] ).T\n\n return result.reshape((tarr_len - discrete_kernel_shape[0] + 1,) + output_shape)", "def conv(h,x):\n\n final_conv_dim=(512,512) ## dimension of the convolution result before cropping\n x_dim=(x.size(2),x.size(3)) ## dimension of x\n h_dim=(h.size(2),h.size(3)) ## dimension of h\n crop_dim=x_dim ## image obtained after cropping is the same dimension as the image x\n\n padding=(final_conv_dim[0]-(x_dim[0]-h_dim[0]+1),final_conv_dim[1]-(x_dim[1]-h_dim[1]+1)) ## calculate the amount of padding required given final_conv_dim, x_dim and h_dim\n\n x_pad=F.pad(x,(padding[0]//2,padding[0]//2+1,padding[1]//2,padding[1]//2+1)) ## pad x\n y=F.conv2d(x_pad,h.flip(2,3),padding=0) ## convolve x_pad with h\n \n ## starting and ending values along the column and the rows for cropping\n starti=(final_conv_dim[0]-crop_dim[0])//2 \n endi=crop_dim[0]+starti\n startj=(final_conv_dim[1]-crop_dim[1])//2\n endj=crop_dim[1]+startj\n\n ## Cropping\n y=y[:,:,starti:endi,startj:endj]\n \n return y", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Use np.pad for zero padding of the input.\n #Save shape of input data and filters.\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n x = np.pad(x,[(0,0),(0,0),(1,1),(1,1)],mode = 'constant')\n #Convolve each filter to create the activation maps.\n '''Compute activation maps size.First dimension:number of training examples.\n Second dimension:depth is as the number of filters.\n Width and height will be computed based on the equation that we showed in the lectures.\n The equation :(W - F + 2P)/S + 1 where:\n -W:input size.\n -F:receptive field(number of filters).\n -P: padding size.\n -S: the stride that we use.\n '''\n out_width = int((W - WW + 2 * pad) / (stride) + 1)\n out_height = int((H - HH + 2 * pad) / (stride) + 1)\n out = np.zeros((N,F,out_height,out_width))\n #Compute the activation maps for each one of the N training examples.\n for t in range(N):\n curr_x = x[t,:,:,:]\n #Loop over each filter.\n for k in range(F):\n curr_filter = w[k,:,:,:]\n #Go over all valid spots in current training example.\n out_i = 0\n for i in range(0,x.shape[2] - HH + 1,stride):\n out_j = 0\n for j in range(0,x.shape[3] - WW + 1,stride):\n #Compute dot product in current spot.\n dot_product = np.sum(curr_filter * curr_x[:,i:(i + HH),j:(j + WW)])\n out[t,k,out_i,out_j] = dot_product \\\n + b[k]\n #Increment out_j\n out_j += 1\n out_i += 1\n\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output", "def _convolve_2d(kernel, image):\n\n nx = image.shape[0]\n ny = image.shape[1]\n nkx = kernel.shape[0]\n nky = kernel.shape[1]\n wkx = nkx // 2\n wky = nky // 2\n\n result = np.zeros(image.shape, dtype=float32)\n\n for i in prange(0, nx, 1):\n iimin = max(i - wkx, 0)\n iimax = min(i + wkx + 1, nx)\n for j in prange(0, ny, 1):\n jjmin = max(j - wky, 0)\n jjmax = min(j + wky + 1, ny)\n num = 0.0\n for ii in range(iimin, iimax, 1):\n iii = wkx + ii - i\n for jj in range(jjmin, jjmax, 1):\n jjj = wky + jj - j\n num += kernel[iii, jjj] * image[ii, jj]\n result[i, j] = num\n\n return result", "def convolve(kerns, kshp, nkern, images, imgshp, step=(1, 1), bias=None,\r\n mode='valid', flatten=True):\r\n N = numpy\r\n # start by computing output dimensions, size, etc\r\n kern_size = N.int64(N.prod(kshp))\r\n\r\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\r\n # in the first case, default nfeatures to 1\r\n if N.size(imgshp) == 2:\r\n imgshp = (1,) + imgshp\r\n\r\n # construct indices and index pointers for sparse matrix, which,\r\n # when multiplied with input images will generate a stack of image\r\n # patches\r\n indices, indptr, spmat_shape, sptype, outshp = \\\r\n convolution_indices.conv_eval(imgshp, kshp, step, mode)\r\n\r\n # build sparse matrix, then generate stack of image patches\r\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\r\n indptr, spmat_shape)\r\n patches = (sparse.structured_dot(csc, images.T)).T\r\n\r\n # compute output of linear classifier\r\n pshape = tensor.stack(images.shape[0] * tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(imgshp[0] * kern_size))\r\n patch_stack = tensor.reshape(patches, pshape, ndim=2)\r\n\r\n # kern is of shape: nkern x ksize*number_of_input_features\r\n # output is thus of shape: bsize*outshp x nkern\r\n output = tensor.dot(patch_stack, kerns.T)\r\n\r\n # add bias across each feature map (more efficient to do it now)\r\n if bias is not None:\r\n output += bias\r\n\r\n # now to have feature maps in raster order ...\r\n # go from bsize*outshp x nkern to bsize x nkern*outshp\r\n newshp = tensor.stack(images.shape[0],\\\r\n tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(nkern))\r\n tensout = tensor.reshape(output, newshp, ndim=3)\r\n output = tensor.DimShuffle((False,) * tensout.ndim, (0, 2, 1))(tensout)\r\n if flatten:\r\n output = tensor.flatten(output, 2)\r\n\r\n return output, N.hstack((nkern, outshp))", "def PrimaryCap(inputs, dim_vector, n_channels, kernel_size, strides, padding):\n output = layers.Conv2D(filters=dim_vector*n_channels, kernel_size=kernel_size, strides=strides, padding=padding)(inputs)\n outputs = layers.Reshape(target_shape=[-1, dim_vector])(output)\n return layers.Lambda(squash)(outputs)", "def convolution2D(ndarray, kernel, kernel_pivot):\n\t#validation of arrays types\n\tassert ndarray.dtype == np.float, 'Invalid dtype of ndarray should be float'\n\tassert kernel.dtype == np.float, 'Invalid dtype of kernel should be float'\n\tassert ndarray.ndim == 2, 'Invalid ndarray dimension'\n\tassert kernel.ndim == 2, 'Invalid kernel dimension'\n\t#check if the kernel_pivot is valid\n\theight_kernel, width_kernel = kernel.shape\n\tx_pivot, y_pivot = kernel_pivot\n\n\tif not (x_pivot >= 0 and x_pivot < width_kernel) and (y_pivot >= 0 and y_pivot < height_kernel):\n\n\t\tassert False, 'Invalid pivot coordinates'\n\n\tflatten_kernel = kernel.flatten()\n\n\t#create new ndarray object for store the result\n\tresult_ndarray = np.zeros(ndarray.shape, dtype = float)\n\n\t#get the actual shape for \n\theight, width = ndarray.shape\n\t\n\t#calculate kernel bounds\n\tx_min, x_max = (x_pivot), ((width_kernel-1) - x_pivot)\n\ty_min, y_max = (y_pivot), ((height_kernel-1) - y_pivot)\n\n\t#change the bounds of my array with concatenate fuctions\n\tleft, right, up, down = x_min, x_max, y_min, y_max\n\tndarray = expand_img(ndarray, left, right, up, down)\n\n\tx_init, x_end = x_min, x_min + width\n\ty_init, y_end = y_min, y_min + height\n\t#loops for access to ndarrays\n\tfor x in range(x_init, x_end ):\n\t\tfor y in range(y_init, y_end ):\n\t\t\t#get the data of original image\n\t\t\tponderate_values = ndarray[(y - y_min):(y + y_max + 1) , # y range to indexed\n\t\t\t\t\t\t\t\t\t (x - x_min):(x + x_max) + 1] # x range to indexed\n\t\t\tponderate_values = ponderate_values.flatten()\n\n\t\t\t#get the dot product for the ponderate sum\n\t\t\tresult_ndarray[x - x_init, y - y_init] = np.dot(ponderate_values, flatten_kernel) \n\n\treturn result_ndarray", "def moffat_convolution_fft(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\n im_kernel_array = moffat_kernel(n_fwhm,beta,r_s)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def convolve(signal, filter):\r\n\r\n # Make the signal and filter the correct size\r\n padded_signal, padded_filter = preprocess(signal, filter) # Constant time\r\n fft_signal = fft(padded_signal) # Log(n) complexity\r\n fft_filter = fft(padded_filter) # Log(n) complexity\r\n filtered_signal = np.multiply(fft_signal, fft_filter) # Element wise multiply (p multiplies)\r\n time_signal = inverse_fft(filtered_signal) # O(N^2)\r\n # Remove excess zeros\r\n time_signal = postprocess(time_signal, signal.size, filter.size) # O(N)\r\n print(\"Done Filtering\")\r\n # return np.convolve(filter, signal) # Replace with your fft implementation\r\n return time_signal", "def image_conv(image, kernel):\n \n # Filter2D used for performance\n return cv2.filter2D(image, -1, kernel)", "def _corr1d_0(input, filter, output, wrap=True, cval=0.0):\n #3 loops: rows, cols, filter along rows\n rows, cols = input.shape\n N = len(filter)\n n = N//2\n #access scans whole col of output at once for better cache coherency\n for r in range(rows):\n for c in prange(cols):\n output[r,c] = 0\n for i in range(N):\n j = r-n+i\n if wrap:\n j %= rows\n if j >= 0 and j < rows:\n output[r,c] += input[j,c]*filter[i]\n else:\n output[r,c] += cval*filter[i]\n return output", "def _convs_unoptimized(args, filter_size, num_features, bias, bias_start=0.0, convtype='convolution'):\n\n # Calculate the total size of arguments on dimension 1\n\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3, 4, 5]:\n raise ValueError(\"Conv Linear expects 3D, 4D or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args to be of same Dimension: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n if shape_length != 4 and convtype == \"separable\":\n print ('[ERROR] separable convLSTM is only implemented for conv2D')\n raise NotImplementedError \n\n if len(args) != 2:\n print ('LSTM is only implemented with len(args) = 2!')\n raise NotImplementedError\n\n # Determine correct conv operation\n\n c_i = shapes[0][-1] # number of input channels per tensor in args\n c_o = num_features//4 # number of output channels per gate and cell state\n\n if convtype == 'separable': \n if shape_length == 3:\n conv_op = tf.nn.separable_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.separable_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.separable_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n elif convtype == 'depthwise': \n if shape_length == 3:\n conv_op = tf.nn.depthwise_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.depthwise_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.depthwise_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n else: # Normal CONV and spatially separable CONV\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n\n # Now the computation\n\n if convtype == 'spatial':\n # Get kernels\n\n kernel_h = vs.get_variable(\"kernel_h\", [filter_size[0], 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_h: ', [filter_size[0], 1, total_arg_size_depth, num_features])\n kernel_w = vs.get_variable(\"kernel_w\", [1, filter_size[1], total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_w: ', [1, filter_size[1], total_arg_size_depth, num_features])\n\n W_ix_h = kernel_h[..., 0:c_i, 0:1*c_o] # Name pattern: W(eights) for i(nput gate) for h(eight) CONV with x\n W_ih_h = kernel_h[..., c_i:2*c_i, 0:1*c_o]\n W_cx_h = kernel_h[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_h = kernel_h[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_h = kernel_h[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_h = kernel_h[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_h = kernel_h[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_h = kernel_h[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n W_ix_w = kernel_w[..., 0:c_i, 0:1*c_o]\n W_ih_w = kernel_w[..., c_i:2*c_i, 0:1*c_o]\n W_cx_w = kernel_w[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_w = kernel_w[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_w = kernel_w[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_w = kernel_w[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_w = kernel_w[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_w = kernel_w[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x_h = conv_op(args[0], W_ix_h, strides, padding=\"SAME\")\n i_x = conv_op(i_x_h, W_ix_w, strides, padding=\"SAME\")\n i_h_h = conv_op(args[1], W_ih_h, strides, padding=\"SAME\")\n i_h = conv_op(i_h_h, W_ih_w, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x_h = conv_op(args[0], W_cx_h, strides, padding=\"SAME\")\n c_x = conv_op(c_x_h, W_cx_w, strides, padding=\"SAME\")\n c_h_h = conv_op(args[1], W_ch_h, strides, padding=\"SAME\")\n c_h = conv_op(c_h_h, W_ch_w, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x_h = conv_op(args[0], W_fx_h, strides, padding=\"SAME\")\n f_x = conv_op(f_x_h, W_fx_w, strides, padding=\"SAME\")\n f_h_h = conv_op(args[1], W_fh_h, strides, padding=\"SAME\")\n f_h = conv_op(f_h_h, W_fh_w, strides, padding=\"SAME\")\n\n # output gate\n\n o_x_h = conv_op(args[0], W_ox_h, strides, padding=\"SAME\")\n o_x = conv_op(o_x_h, W_ox_w, strides, padding=\"SAME\")\n o_h_h = conv_op(args[1], W_oh_h, strides, padding=\"SAME\")\n o_h = conv_op(o_h_h, W_oh_w, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'depthwise':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'separable':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n kernel_sep = vs.get_variable(\"kernel_sep\", [1, 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_sep: ', [1, 1, total_arg_size_depth, num_features])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n Wsep_ix = kernel_sep[..., 0:c_i, 0:1*c_o]\n Wsep_ih = kernel_sep[..., c_i:2*c_i, 0:1*c_o]\n Wsep_cx = kernel_sep[..., 0:c_i, 1*c_o:2*c_o]\n Wsep_ch = kernel_sep[..., c_i:2*c_i, 1*c_o:2*c_o]\n Wsep_fx = kernel_sep[..., 0:c_i, 2*c_o:3*c_o]\n Wsep_fh = kernel_sep[..., c_i:2*c_i, 2*c_o:3*c_o]\n Wsep_ox = kernel_sep[..., 0:c_i, 3*c_o:4*c_o]\n Wsep_oh = kernel_sep[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, Wsep_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, Wsep_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, Wsep_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, Wsep_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, Wsep_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, Wsep_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, Wsep_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, Wsep_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n else: # normal CONV\n # Get kernel\n\n kernel = vs.get_variable(\"kernel\", filter_size + [total_arg_size_depth, 4*c_o], dtype=dtype)\n print('kernel: ', filter_size + [total_arg_size_depth, 4*c_o])\n\n W_ix = kernel[..., 0:c_i, 0:1*c_o]\n W_ih = kernel[..., c_i:2*c_i, 0:1*c_o]\n W_cx = kernel[..., 0:c_i, 1*c_o:2*c_o]\n W_ch = kernel[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx = kernel[..., 0:c_i, 2*c_o:3*c_o]\n W_fh = kernel[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox = kernel[..., 0:c_i, 3*c_o:4*c_o]\n W_oh = kernel[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n \n if not bias:\n return res\n bias_term = vs.get_variable(\"biases\", [num_features], dtype=dtype,\n initializer=init_ops.constant_initializer(bias_start, dtype=dtype))\n return res + bias_term", "def _strict_conv1d(x, h):\n with ops.name_scope('strict_conv1d', values=[x, h]):\n x = array_ops.reshape(x, (1, -1, 1, 1))\n h = array_ops.reshape(h, (-1, 1, 1, 1))\n result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')\n return array_ops.reshape(result, [-1])", "def out_conv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n\n return (spatial + p2 - k)//s + 1", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding as used in the previous assignment can make\n # derivatives at the image boundary very big.\n \n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge') \n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.flipud(np.fliplr(kernel)) # flip h/v\n for h in range(Hi):\n for w in range(Wi):\n out[h, w] = np.sum(np.multiply(kernel, padded[h : h + Hk, w : w + Wk]))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def convolve_fft(array, kernel):\n\n array = np.asarray(array, dtype=np.cfloat)\n kernel = np.asarray(kernel, dtype=np.cfloat)\n\n if array.ndim != kernel.ndim:\n raise ValueError(\"Image and kernel must have same number of \"\n \"dimensions\")\n\n array_shape = array.shape\n kernel_shape = kernel.shape\n new_shape = np.array(array_shape) + np.array(kernel_shape)\n\n array_slices = []\n kernel_slices = []\n for (new_dimsize, array_dimsize, kernel_dimsize) in zip(\n new_shape, array_shape, kernel_shape):\n center = new_dimsize - (new_dimsize + 1) // 2\n array_slices += [slice(center - array_dimsize // 2,\n center + (array_dimsize + 1) // 2)]\n kernel_slices += [slice(center - kernel_dimsize // 2,\n center + (kernel_dimsize + 1) // 2)]\n\n array_slices = tuple(array_slices)\n kernel_slices = tuple(kernel_slices)\n\n if not np.all(new_shape == array_shape):\n big_array = np.zeros(new_shape, dtype=np.cfloat)\n big_array[array_slices] = array\n else:\n big_array = array\n\n if not np.all(new_shape == kernel_shape):\n big_kernel = np.zeros(new_shape, dtype=np.cfloat)\n big_kernel[kernel_slices] = kernel\n else:\n big_kernel = kernel\n\n array_fft = np.fft.fftn(big_array)\n kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))\n\n rifft = np.fft.ifftn(array_fft * kernel_fft)\n\n return rifft[array_slices].real", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the convolutional backward pass. #\n ###########################################################################\n #Extract variables from cache.\n x,w,b,conv_param = cache\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Extract shapes(lots of dimensions can become buggy)\n N,F,out_height,out_width = dout.shape\n #Save filter dimensions.\n HH,WW = w.shape[2],w.shape[3]\n #Start by computing gradient of the bias.(always the simplest one)\n db = np.sum(np.sum(np.sum(dout,axis = 3),axis = 2),axis = 0)\n dw = np.zeros_like(w)\n dx = np.zeros_like(x)\n #Start computing gradient of w and x.(Naive implementation)\n #Go over each filter in w.\n for i in range(F):\n #Go over each training example.\n for j in range(N):\n curr_x = x[j,:,:,:]\n #Get current gradient of activation map for j filter on i training example.\n curr_dout = dout[j,i,:,:]\n a = 0;b = 0\n #print(\"HERE\",curr_x.shape)\n #print(\"Stride:\",stride)\n for t in range(0,curr_x.shape[1] - WW + 1,stride):\n for k in range(0,curr_x.shape[2] - HH + 1,stride):\n #print(\"t: %d k: %d WW:%d HH:%d \" % (t,k,WW,HH))\n dw[i,:,:,:] += curr_dout[a,b] * curr_x[:,t:(t + WW),k:(k + HH)]\n dx[j,:,t:(t + WW),k:(k + HH)] += curr_dout[a,b] * w[i,:,:,:]\n if(b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n #Remove padding.\n dx = dx[:,:,pad : (dx.shape[2] - pad),pad: (dx.shape[3] - pad)] \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1), alpha=1.0, nl='RE'):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = KL.Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides)(inputs)\n x = KL.BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl=nl)", "def convolve(img, fltr, same=False, stri=1, pad=0, repfilter=False):\n # focus = np.array{eltype(img),2} # scope outside of if block\n if np.ndim(img) == 3:\n imgd, imgx, imgy = np.shape(img)\n elif np.ndim(img) == 2:\n imgx, imgy = np.shape(img)\n imgd = 1\n else:\n print(\"Wrong dimensions of image file. Quitting.\")\n return\n\n if np.ndim(fltr) == 3:\n fd, fx, fy = np.shape(fltr)\n elif np.ndim(fltr) == 2:\n fx, fy = np.shape(fltr)\n fd = 1\n else:\n print(\"Wrong dimensions of filter. Quitting.\")\n return\n\n if fd != imgd: # as a convenience we could just replicate the 2d filter...\n print(\"Depths of image and filter not equal. Quitting.\")\n return\n\n if same:\n pad = math.ceil((fx - 1) / 2)\n\n if pad > 0:\n img = dopad(img, pad)\n\n # dimensions of the result of convolution\n x_out = (imgx + 2 * pad - fx) // stri + 1\n y_out = (imgy + 2 * pad - fy) // stri + 1\n\n # print(imgx, imgy)\n\n ret = np.zeros((x_out, y_out))\n if imgd > 1: # slice through the depth, the zeroth (first) dimension\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[:, i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n else:\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n return ret", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, image_h, image_w, image_c = images.shape\n kernel_h, kernel_w, kernel_c = kernel.shape\n stride_h, stride_w = stride\n\n if isinstance(padding, tuple):\n padding_h, padding_w = padding\n if padding is 'same':\n padding_h = int(((stride_h * image_h)\n - stride_h + kernel_h - image_h) / 2) + 1\n padding_w = int(((stride_w * image_w)\n - stride_w + kernel_w - image_w) / 2) + 1\n if padding is 'valid':\n padding_h, padding_w = 0, 0\n\n output_h = int(((image_h + (2 * padding_h) - kernel_h) / stride_h) + 1)\n output_w = int(((image_w + (2 * padding_w) - kernel_w) / stride_w) + 1)\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w), (0, 0)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n s_h = (stride_h)\n s_w = (stride_w)\n multiply = images[\n img_m,\n i*s_h:kernel_h+i*s_h,\n j*s_w:kernel_w+j*s_w]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2, 3))\n return conv_output", "def convolution(image, kernel, scale=None, offset=0):\n kernel = np.array(kernel).flatten().tolist()\n if len(kernel)==9:\n size = (3,3)\n elif len(kernel)==25:\n size = (5,5)\n else:\n raise ValueError('Kernel size must be (3,3) or (5,5).')\n return image.filter(ImageFilter.Kernel(size, kernel, scale, offset))", "def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def Get_Convolution(label, radius, feature_dict, pad = True, convert_length = 0.2204315, verbose = False, \n path = '', filename = '', meta = None):\n ## Make convolution at specified radius\n r = round(radius / convert_length)\n num_class = len(feature_dict)\n ## Create circular filter window\n x = np.arange(0, 2*r)\n y = np.arange(0, 2*r)\n mask = (x[np.newaxis,:]-r)**2 + (y[:,np.newaxis]-r)**2 < r**2 \n mask = mask[:,:,np.newaxis, np.newaxis]\n mask_tensor = tf.constant(mask, tf.float32)\n\n expanded_label = Expand_Mask(label, feature_dict)\n lab_shape = expanded_label.shape\n all_lab = np.zeros((lab_shape[0] - mask.shape[0] + 1, lab_shape[1] - mask.shape[1] + 1, num_class))\n for val in range(num_class): \n ohe_layer = expanded_label[:,:,val]\n ohe_tensor = tf.constant(ohe_layer[np.newaxis, :, :, np.newaxis], tf.float32)\n tensor_res = tf.nn.convolution(ohe_tensor, mask_tensor, padding='VALID') \n all_lab[:,:,val] = tensor_res.numpy()[0,:,:,0]\n if verbose:\n print('Finished: ' + str(val))\n \n if pad:\n array_shape = label.shape\n # up-down padding\n tot_pw_ud = (array_shape[0] - all_lab.shape[0])/2\n pw_up = int(np.ceil(tot_pw_ud))\n pw_down = int(np.floor(tot_pw_ud))\n # left-right padding\n tot_pw_lr = (array_shape[1] - all_lab.shape[1])/2\n pw_left = int(np.ceil(tot_pw_lr))\n pw_right = int(np.floor(tot_pw_lr))\n all_lab_pad = np.pad(all_lab, pad_width = ((pw_down, pw_up), (pw_left, pw_right), (0,0)), \n mode = 'constant', constant_values = 255)\n \n if filename !='':\n try:\n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n \n meta.update(count = num_class, nodata = 255, compress = 'deflate', predictor = 2)\n \n # Write raster label to file\n tif_lab_pad = np.moveaxis(all_lab_pad,-1,0)\n with rasterio.open(path + '/' + filename + '.tif', 'w', **meta) as src:\n src.write(tif_lab_pad) \n return all_lab_pad", "def pooling_layer(self, img):\n img_w, img_h, img_c = img.shape\n output_dim = int((img_w - self._kernel_size) / self._kernel_size) + 1\n output = np.zeros((output_dim, output_dim, img_c))\n\n for c in range(img_c):\n in_x = out_x = 0\n while in_x + self._kernel_size <= img_w:\n in_y = out_y = 0\n while in_y + self._kernel_size <= img_h:\n output[out_x, out_y, c] = np.max(img[in_x:in_x + self._kernel_size, in_y:in_y + self._kernel_size, c])\n in_y += self._kernel_size\n out_y += 1\n in_x += self._kernel_size\n out_x += 1\n return output", "def fast_convolve(wav_val, R, wav_extended, flux_extended, FWHM_lim):\n FWHM = wav_val/R\n \n index_mask = (wav_extended > (wav_val - FWHM_lim*FWHM)) & (wav_extended < (wav_val + FWHM_lim*FWHM))\n \n flux_2convolve = flux_extended[index_mask]\n IP = unitary_Gauss(wav_extended[index_mask], wav_val, FWHM)\n \n sum_val = np.sum(IP*flux_2convolve) \n unitary_val = np.sum(IP*np.ones_like(flux_2convolve)) # Effect of convolution onUnitary. For changing number of points\n \n return sum_val/unitary_val", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_prime = H - (HH - 1)\n W_prime = W - (WW - 1)\n out = np.zeros((N, F, H_prime, W_prime))\n \n for n in range(N):\n for f in range(F):\n for i in range(H_prime):\n for j in range(W_prime):\n out[n, f, i, j] = np.sum(x[n, :, i:i+HH, j:j+WW] * w[f])\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def conv_matrix(matrix, kernel):", "def _fp32_vnchwconv_process(axis_0_index, h_loop_idx, h_size):\n\n def _fp32_inner_vnchwconv(col_lp_idx, col_size):\n \"\"\"\n inner vnchwconv\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size + col_lp_idx * max_sub_w_size +\n h_loop_idx * max_sub_h_size * axis_2 +\n axis_0_index * axis_1 * axis_2)\n data_in_info = (h_size, col_size, axis_1, axis_2, in_offset)\n _data_move_in_mc_on_w(tik_inst, ub_input, data_in, data_in_info)\n\n # for this case, data_move will move in one more block\n with tik_inst.new_stmt_scope():\n h_size_temp = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(tik.all(axis_1 > data_size_one_block,\n h_size % data_size_one_block > 0)):\n h_size_temp.set_as(_ceil_div(h_size, data_size_one_block) *\n data_size_one_block)\n with tik_inst.else_scope():\n h_size_temp.set_as(h_size)\n # transpose by vnchwconv\n sub_hw_size = (h_size_temp, col_size)\n _transpose_by_2_vnchwconv(tik_inst, ub_input[ub_offset],\n ub_input, sub_hw_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size + col_lp_idx * max_sub_w_size) *\n axis_1 + h_loop_idx * max_sub_h_size +\n axis_0_index * axis_1 * axis_2)\n data_out_info = (h_size, col_size, axis_1, axis_2, out_offset)\n _data_move_out_mc_on_w(tik_inst, data_out, ub_input[ub_offset], data_out_info)\n\n with tik_inst.for_range(0, loop_cnt) as lp_idx:\n _fp32_inner_vnchwconv(lp_idx, max_sub_w_size)\n with tik_inst.if_scope(left_size > 0):\n _fp32_inner_vnchwconv(loop_cnt, left_size)", "def _conv(self, indim, outdim, ksize, stride, padding):\n\n return nn.Sequential(\n nn.BatchNorm2d(indim),\n nn.Conv2d(indim, outdim, ksize, stride, padding),\n self.activ(),\n )", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out", "def _convk(\n in_channels, out_channels, kernel_size=3, stride=1, groups=1, dilation=1, bias=False\n):\n padding = dilation * (kernel_size - 1) // 2\n return Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=bias,\n dilation=dilation,\n )", "def conv_block(input_tensor, kernel_size, filters, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x", "def compute_output(self, input_images, filter_shape, image_shape, poolsize=(2, 2), \n Pstruct = None, b= None):\n\n assert image_shape[1] == filter_shape[1]\n # the bias is a 1D tensor -- one bias per output feature map\n # convolve input feature maps with filters\n\n\n batch_size = image_shape[0] \n fwidth = Pstruct[0]['U1'].shape[0]\n fheight = Pstruct[0]['U2'].shape[0]\n nbr_channels = image_shape[1]\n nbr_filters = Pstruct[0]['U3'].shape[0]\n initial_n_rows = image_shape[2]\n initial_n_cols = image_shape[3]\n \n # Final number of rows and columns \n final_n_rows = initial_n_rows - fwidth + 1\n final_n_cols = initial_n_cols - fheight + 1\n # The convolved input images\n input4D = theano.shared(np.zeros((batch_size, nbr_filters, \n final_n_rows, final_n_cols)))\n print 'batch size ', batch_size \n one_image_shape = (1, initial_n_rows, initial_n_cols)\n # assert one_image_shape == (1,28,28)\n for image_index in range(batch_size):\n for channel_index in range(nbr_channels):\n # Convolve image with index image_index in the batch\n input4D = self.convolve_one_image(input4D, \n input_images[image_index,channel_index,:,:].reshape((1, initial_n_rows, initial_n_cols)),\n one_image_shape,\n Pstruct, \n filter_shape, \n image_index,\n channel_index) \n # downsample each feature map individually, using maxpooling\n start = time.time()\n pooled_out = downsample.max_pool_2d(input=input4D,\n ds=poolsize, \n ignore_border=True)\n end = time.time()\n self.downsample_time = (end - start)*1000/ image_shape[0]\n \n \n # add the bias term. Since the bias is a vector (1D array), we first\n # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will\n # thus be broadcasted across mini-batches and feature map\n # width & height\n self.output = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))", "def ddcconv1d(inputs: tf.Variable,\n weights: tf.Variable,\n offsets: tf.Variable,\n dilation_rate: int = 1,\n offset_mode='F',\n interpolate=True,\n name: str='ddcc1d'):\n with tf.variable_scope(name):\n batch_size, seq_length, channels = (int(v) for v in inputs.shape)\n filters, _, kernel_size = (int(v) for v in weights.shape)\n\n spec_shapes = {\n 'B': batch_size,\n 'S': seq_length,\n 'F': filters,\n 'C': channels,\n 'K': kernel_size\n }\n\n # Indices stuff\n with tf.variable_scope('KernelBaseIndices'):\n base_indices = np.arange(seq_length).repeat(kernel_size).reshape((-1, kernel_size))\n window_indices = tf.constant(base_indices, dtype=tf.float32, name='window_indices')\n receptive_field = tf.constant(np.linspace(-kernel_size + 1, 0, kernel_size) * dilation_rate,\n name='receptive_field',\n dtype=tf.float32)\n kernel_indices = window_indices + receptive_field\n\n with tf.variable_scope('BatchIndices'):\n # Create batch indices constant in BSFCK shape\n batch_indices_np = expand_transform(np.arange(batch_size, dtype=np.int32), 'B', 'BSFCK', spec_shapes, numpy=True)\n batch_indices = tf.constant(batch_indices_np, dtype=tf.int32, name='batch_indices')\n\n with tf.variable_scope('ChannelIndices'):\n # Create channel indices constant in BSFCK shape\n channel_indices_np = expand_transform(np.arange(channels, dtype=np.int32), 'C', 'BSFCK', spec_shapes, numpy=True)\n channel_indices = tf.constant(channel_indices_np, dtype=tf.int32, name='channel_indices')\n\n with tf.variable_scope('Sampling'):\n # SAMPLING IS EXTREMELY EXPENSIVE!!!!!\n coords = get_coords(kernel_indices, offsets, offset_mode=offset_mode, spec_shapes=spec_shapes)\n\n if interpolate:\n # Left and right indices, e.g. index of 3.65 would be 3 on the left and 4 on the right\n indices_left = tf.cast(tf.floor(coords), tf.int32)\n indices_right = tf.cast(tf.ceil(coords), tf.int32)\n\n # Calculate interpolation, for index 3.65 interpolation factor would be 0.65\n interpolation = coords - tf.cast(indices_left, tf.float32)\n\n # Sample both values (on the lef and right)\n # Sample input of shape BSC with BSFCK3 indices (produced by stack) -> BSFCK for each side (left and right)\n vals_left = tf.gather_nd(inputs, tf.stack((batch_indices, indices_left, channel_indices), axis=-1))\n vals_right = tf.gather_nd(inputs, tf.stack((batch_indices, indices_right, channel_indices), axis=-1))\n\n # Interpolated values\n samples = vals_left + (vals_right - vals_left) * interpolation\n else:\n batch_idx = tf.stack((batch_indices, tf.cast(tf.floor(coords), tf.int32), channel_indices), axis=-1)\n samples = tf.gather_nd(inputs, batch_idx)\n\n with tf.variable_scope('Convolution'):\n # Apply weights: BSFCK * FCK = BSFCK\n conv = samples * weights\n\n # Sum across kernel: BSFCK -> BSFC\n conv = tf.reduce_sum(conv, axis=-1)\n\n # Sum across channels: BSFC -> BSF\n conv = tf.reduce_sum(conv, axis=-1)\n\n return conv", "def forward(self, someInputs):\n\n if self.spaceConv is True:\n someInputs = self.SpaceConvMatrixTranspose(someInputs)\n if self.outputValues.shape == self.outputShape:\n pass\n else:\n self.outputValues = np.transpose(self.outputValues, (3, 1, 2, 0))\n else:\n someInputs = np.reshape(someInputs, (self.inputShape))\n\n assert someInputs.shape == self.inputShape\n\n # Adds Zero Padding\n if self.zeroPadding is 0: # no padding added\n self.inputs = someInputs\n\n elif self.zeroPadding > 0: # adds padding\n self.inputs = np.zeros((self.inputShape[0], self.inputShape[1], self.inputShape[2] + 2 * self.zeroPadding,\n self.inputShape[\n 3] + 2 * self.zeroPadding)) # creates a zeros vector with the shape of the padded inputs\n\n for n in range(self.inputShape[0]): # does the padding along the W dimension\n for cin in range(self.inputShape[1]):\n for h in range(self.inputShape[2]):\n self.inputs[n, cin, h, :] = np.lib.pad(self.someInputs[n, cin, h, :],\n (self.zeroPadding, self.zeroPadding),\n 'constant', constant_values=(0, 0))\n for n in range(self.inputShape[0]): # does the padding along the H dimmension\n for cin in range(self.inputShape[1]):\n for w in range(self.inputShape[3]):\n self.inputs[n, cin, :, w + self.zeroPadding] = np.lib.pad(self.someInputs[n, cin, :, w],\n (self.zeroPadding, self.zeroPadding),\n 'constant', constant_values=(0, 0))\n\n # Do the convolution\n print \"Performing convolution\"\n timeA = time.time()\n for n in range(self.inputShape[0]):\n for cout in range(self.numberOfFilters):\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputShape[2] - self.kernelSize[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputShape[3] - self.kernelSize[1] + 1, self.stride[1]):\n activationMap = self.inputs[n, cin, h:h + self.kernelSize[0],\n w:w + self.kernelSize[1]] # Portion of the input feature map convolved\n kernel = self.weights[cout, :, :] # kernel used for the convolution\n self.outputValues[n, cout, nh, nw] = np.sum(activationMap * kernel) + self.bias[\n cout] # convolution\n nw += 1\n nh += 1\n\n timeB = time.time()\n\n if self.spaceConv is True:\n self.outputValues = np.transpose(self.outputValues, (3, 1, 2, 0))\n\n # print \"Convolution took \" + str(timeB - timeA) + \" seconds\"\n\n # Applies the activation function to the resultant matrix\n if self.activationFunction is 'relu':\n self.outcome = self.relu(self.outputValues)\n # Applies reLU function\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome) # Applies eLU function\n\n elif self.activationFunction is 'elu':\n self.outcome = self.elu(self.outputValues, self.alpha)\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome)\n\n elif self.activationFunction is 'sigmoid': # Applies sigmoid function\n\n self.outcome = self.sigmoid(self.outputValues)\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome)", "def conv1D(inSignal: np.ndarray, kernel1: np.ndarray) -> np.ndarray:\r\n flip_kernel = kernel1[::-1]\r\n kernel_len = len(kernel1)\r\n signal_longer = inSignal\r\n\r\n # padding with zeros the inSignal arr:\r\n for i in range(kernel_len - 1):\r\n signal_longer = np.insert(signal_longer, 0, 0)\r\n signal_longer = np.append(signal_longer, 0)\r\n\r\n new_img = np.zeros(len(inSignal) + kernel_len - 1)\r\n for i in range(kernel_len + len(inSignal) - 1):\r\n new_img[i] = ((flip_kernel * signal_longer[i:kernel_len + i]).sum())\r\n\r\n return new_img", "def patchify2d(w_in, w_out, k, *, bias=True):\n return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)", "def conv_2D(img,kernel,stride=1):\n\n m,n = img.shape\n r,c = kernel.shape\n\n kernel = np.flip(kernel,axis=1)\n kernel = np.flip(kernel,axis=0)\n\n c_m, c_n = int(np.ceil((m-r+1)/stride)), int(np.ceil((n-c+1)/stride))\n img_conv = np.zeros((c_m,c_n),dtype=float)\n\n for i,j in it.product(range(c_m),range(c_n)):\n img_conv[i,j] = (img[i*stride:i*stride+r,j*stride:j*stride+c] * kernel).sum()\n\n return img_conv", "def apply_mask(components):\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components", "def preprocess_conv(x_train, x_test, percentage, stride, grayscale=True,\n verbose=0):\n if grayscale:\n x_train = dct_set_gray(x_train, stride, verbose=verbose)\n x_test = dct_set_gray(x_test, stride, verbose=verbose)\n\n variances = np.var(x_train, axis=0)\n stdevs = np.sqrt(variances)\n idx = get_clip_index_conv(stdevs, percentage)\n for image in x_train:\n image[idx] = 0\n for image in x_test:\n image[idx] = 0\n else:\n x_train = dct_set_color(x_train, stride, verbose=verbose)\n x_test = dct_set_color(x_test, stride, verbose=verbose)\n if K.image_data_format() == 'channels_first':\n for i in range(3):\n variances = np.var(x_train[:, i, :, :], axis=0)\n stdevs = np.sqrt(variances)\n idx = get_clip_index_conv(stdevs, percentage)\n for image in x_train:\n channel = image[i]\n channel[idx] = 0\n for image in x_test:\n channel = image[i]\n channel[idx] = 0\n else:\n for i in range(3):\n variances = np.var(x_train[:, :, :, i], axis=0)\n stdevs = np.sqrt(variances)\n idx = get_clip_index_conv(stdevs, percentage)\n for image in x_train:\n channel = image[:, :, i]\n channel[idx] = 0\n for image in x_test:\n channel = image[:, :, i]\n channel[idx] = 0\n\n return x_train, x_test", "def convolutional(X, X_test, input_shape, n_filters, filter_size):\n\n\tfilters_shape = (n_filters, input_shape[1], filter_size[0], filter_size[1])\n\tfilters = theano.shared(\n\t\tnumpy.random.uniform(low=-0.1, high=0.1, size=filters_shape).astype(numpy.float32),\n\t\t'conv_filters'\n\t)\n\n\toutput_shape = (input_shape[0], n_filters, input_shape[2], input_shape[3])\n\n\toutput = conv2d(input=X, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')\n\toutput_test = conv2d(input=X_test, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')\n\n\tshift_x = (filter_size[0] - 1) // 2\n\tshift_y = (filter_size[1] - 1) // 2\n\n\toutput = output[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]\n\toutput_test = output_test[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]\n\n\treturn output, output_test, [filters], output_shape", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def convolution_2d(img, kernel):\n # TODO write convolution of arbritrary sized convolution here\n # Hint: you need the kernelsize\n\n offset = int(kernel.shape[0] / 2)\n irows, icols = img.shape\n newimg = np.zeros((irows - offset, icols - offset, offset * 2 + 1, offset * 2 + 1))\n nrows, ncols, _, _ = newimg.shape\n for x in range(nrows - 1):\n for y in range(ncols - 1):\n newimg[x, y, :, :] = img[x:x + offset * 2 + 1, y:y + offset * 2 + 1]\n newimg *= kernel\n\n newimg = np.sum(newimg, axis=3)\n newimg = np.sum(newimg, axis=2)\n\n return newimg", "def apply_filter(data, filter_bank, sfreq): \n if data.ndim == 1:\n filtered = np.zeros((1, filter_bank.shape[0], sfreq))\n for filt in range(filter_bank.shape[0]):\n filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]\n elif data.ndim == 2:\n filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))\n for chan in range(data.shape[0]):\n for filt in range(filter_bank.shape[0]):\n filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \\\n data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode=\"full\"\n return filtered", "def conv(a, k_h, k_w, c_o, s_h, s_w, name, rate=1, biased=True, relu=True, padding='zeros', initializer=None):\n\n c_i = a.get_shape()[-1]\n convolve = lambda i, k: tf.nn.convolution(i, k, padding=padding, strides=[s_h, s_w], dilation_rate=[rate, rate])\n with tf.variable_scope(name) as scope:\n\n # init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)\n init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(factor=0.01, mode='FAN_AVG', uniform=False)\n init_biases = tf.constant_initializer(0.0)\n kernel = make_var('weights', [k_h, k_w, c_i, c_o], init_weights)\n if biased:\n biases = make_var('biases', [c_o], init_biases)\n conv = convolve(a, kernel)\n if relu:\n bias = tf.nn.bias_add(conv, biases)\n return tf.nn.relu(bias)\n return tf.nn.bias_add(conv, biases)\n else:\n conv = convolve(a, kernel)\n if relu:\n return tf.nn.relu(conv)\n return conv" ]
[ "0.85066", "0.7019092", "0.68060744", "0.6763622", "0.6690711", "0.6668837", "0.6661964", "0.6645885", "0.65938205", "0.6570192", "0.65078753", "0.6501851", "0.6485171", "0.6481288", "0.64750814", "0.64741033", "0.64626104", "0.64103127", "0.63968295", "0.63761157", "0.6356303", "0.6354948", "0.63338345", "0.63224655", "0.6294899", "0.629071", "0.6285808", "0.62841165", "0.6281297", "0.62706685", "0.6263088", "0.6252689", "0.6225844", "0.62165135", "0.6193664", "0.6172116", "0.61620075", "0.61620075", "0.6160968", "0.614653", "0.6120964", "0.61158854", "0.6103202", "0.6100953", "0.6095732", "0.6083362", "0.608174", "0.60800725", "0.6069254", "0.6061493", "0.6049462", "0.60454446", "0.6042316", "0.60390586", "0.6033442", "0.60274327", "0.6019072", "0.6015372", "0.6014147", "0.60138524", "0.60010105", "0.6000534", "0.59982806", "0.59967816", "0.59960604", "0.59884745", "0.5983076", "0.5979384", "0.5965211", "0.59555507", "0.59455043", "0.59364045", "0.592893", "0.59285426", "0.5914967", "0.59120995", "0.59095937", "0.5909151", "0.59072024", "0.5904228", "0.5903441", "0.5898998", "0.58965683", "0.5878687", "0.58698744", "0.5866448", "0.58610046", "0.5840621", "0.5838734", "0.583133", "0.58240014", "0.5818293", "0.58164203", "0.5811044", "0.58104146", "0.5809033", "0.58071", "0.5790685", "0.57798344", "0.5776948", "0.5774422" ]
0.0
-1
Interpolate Wavelengths of spectra to common WL Most likely convert telluric to observed spectra wl after wl mapping performed
def match_wl(wl, spec, ref_wl, method="scipy", kind="linear"): starttime = time.time() if method == "scipy": #print(kind + " scipy interpolation") linear_interp = interp1d(wl, spec, kind=kind) new_spec = linear_interp(ref_wl) elif method == "numpy": if kind.lower() is not "linear": print("Warning: Cannot do " + kind + " interpolation with numpy, switching to linear" ) #print("Linear numpy interpolation") new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat else: print("Method was given as " + method) raise("Not correct interpolation method specified") #print("Interpolation Time = " + str(time.time() - starttime) + " seconds") return new_spec # test inperpolations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def perform_spectral_interpolation(gaussian_data):\n\n dframe = pd.DataFrame()\n wavelength1 = gaussian_data[:, -1]\n\n sampled_wavelength1 = np.arange(min(wavelength1), max(wavelength1), 2)\n wavelength2 = gaussian_data[:, -1]\n sampled_wavelength2 = np.arange(min(wavelength2), max(wavelength2), 2)\n a1_val = gaussian_data[:, 0]\n a2_val = gaussian_data[:, 1]\n sigma1 = gaussian_data[:, 2]\n sigma2 = gaussian_data[:, 3]\n\n # A1 first\n fit_params_a1 = interp1d(wavelength1, a1_val, kind='linear')\n fitted_val_a1 = fit_params_a1(sampled_wavelength1)\n # Now A2\n fit_params_a2 = interp1d(wavelength2, a2_val, kind='linear')\n fitted_val_a2 = fit_params_a2(sampled_wavelength2)\n\n # Now Sigma1\n fit_params_sigma1 = interp1d(wavelength1, sigma1, kind='linear')\n fitted_val_sigma1 = fit_params_sigma1(sampled_wavelength1)\n\n # Now Sigma2\n fit_params_sigma2 = interp1d(wavelength2, sigma2, kind='slinear')\n fitted_val_sigma2 = fit_params_sigma2(sampled_wavelength2)\n\n\n# plt.plot(wavelength1, Sigma1, 'bo')\n# plt.plot(sampled_wavelength1, fitted_val_Sigma1, 'ro--', markersize=3)\n# plt.grid(True, linestyle=':')\n# plt.show()\n dframe = pd.DataFrame({'W1' : sampled_wavelength1,\n 'W2' : sampled_wavelength2,\n 'A1' : fitted_val_a1,\n 'A2' : fitted_val_a2,\n 'Sigma1' : fitted_val_sigma1,\n 'Sigma2' : fitted_val_sigma2,\n })\n\n return dframe.round(3)", "def norm_spectra(spectra, add_infinity=True):\n from scipy import interpolate\n start_n=np.array([3770.,3796.,3835.,3895.,3995.,4130.,4490.,4620.,5070.,5200.,\n 6000.,7000.,7550.,8400.])\n end_n=np.array([3795.,3830.,3885.,3960.,4075.,4290.,4570.,4670.,5100.,5300.,\n 6100.,7050.,7600.,8450.])\n n_range_s=np.array(['P','P','P','P','P','P','M','M','M','M','M','M','M','M'])\n if len(spectra[0])>2:\n snr = np.zeros([len(start_n),3])\n spectra[:,2][spectra[:,2]==0.] = spectra[:,2].max()\n else: \n snr = np.zeros([len(start_n),2])\n wav = spectra[:,0]\n for j in range(len(start_n)):\n if (start_n[j] < wav.max()) & (end_n[j] > wav.min()):\n _s = spectra[(wav>=start_n[j])&(wav<=end_n[j])]\n _w = _s[:,0]\n #Avoids gappy spectra\n k=3 # Check if there are more points than 3\n if len(_s)>k:\n #interpolate onto 10* resolution\n l = np.linspace(_w.min(),_w.max(),(len(_s)-1)*10+1)\n if len(spectra[0])>2:\n tck = interpolate.splrep(_w,_s[:,1],w=1/_s[:,2], s=1000)\n #median errors for max/mid point\n snr[j,2] = np.median(_s[:,2]) / np.sqrt(len(_w))\n else: tck = interpolate.splrep(_w,_s[:,1],s=0.0)\n f = interpolate.splev(l,tck)\n #find maxima and save\n if n_range_s[j]=='P': snr[j,0], snr[j,1] = l[f==f.max()][0], f.max()\n #find mean and save\n elif n_range_s[j]=='M': snr[j,0:2] = np.mean(l), np.mean(f)\n else: print('Unknown n_range_s, ignoring')\n snr = snr[ snr[:,0] != 0 ]\n #t parameter chosen by eye. Position of knots.\n if snr[:,0].max() < 6460: knots = [3000,4900,4100,4340,4860,int(snr[:,0].max()-5)]\n else: knots = [3885,4340,4900,6460]\n if snr[:,0].min() > 3885:\n print('Warning: knots used for spline norm unsuitable for high order fitting')\n knots=knots[1:]\n if (snr[:,0].min() > 4340) or (snr[:,0].max() < 4901): \n knots=None # 'Warning: knots used probably bad'\n if add_infinity: # Adds points at inf & 0 for spline to fit to err = mean(spec err)\n if snr.shape[1] > 2:\n mean_snr = np.mean(snr[:,2])\n snr = np.vstack([ snr, np.array([90000. ,0., mean_snr ]) ])\n snr = np.vstack([ snr, np.array([100000.,0., mean_snr ]) ])\n else:\n snr = np.vstack([ snr, np.array([90000.,0.]) ])\n snr = np.vstack([ snr, np.array([100000.,0.]) ])\n try: #weight by errors\n if len(spectra[0])>2: \n tck = interpolate.splrep(snr[:,0],snr[:,1], w=1/snr[:,2], t=knots, k=3)\n else: tck = interpolate.splrep(snr[:,0],snr[:,1], t=knots, k=3)\n except ValueError:\n knots=None\n if len(spectra[0])>2: \n tck = interpolate.splrep(snr[:,0],snr[:,1], w=1/snr[:,2], t=knots, k=3)\n else: tck = interpolate.splrep(snr[:,0],snr[:,1], t=knots, k=3)\n cont_flux = interpolate.splev(wav,tck).reshape(wav.size, 1)\n spectra_ret = np.copy(spectra)\n spectra_ret[:,1:] = spectra_ret[:,1:]/cont_flux\n return spectra_ret, cont_flux", "def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def powerlaw(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def resample_spectrum(combined_spectrum, camera, showplot=False):\n\n # Unpack the wavelengths and fluxes.\n wls, fls = zip(*combined_spectrum)\n\n # Generate the re-sampled x-axis, starting at the min. wavelength and ending\n # at the max. wavelength. The final bin size should be 0.05 Ang. for SWP\n # cameras or 0.10 Ang. for LWP and LWR cameras. We oversample by a factor\n # of 10 before binning down.\n oversample = 10.\n if camera in [\"LWP\", \"LWR\"]:\n wl_step = 0.1 / oversample\n else:\n wl_step = 0.05 / oversample\n\n # Identify gaps in the data, interpolate those gaps separately so you don't\n # interpolate over a gap. A gap is defined as anywhere with more than three\n # missing points (based on the mean wavelength difference across the\n # spectrum).\n wl_diffs = numpy.diff(wls)\n # These are the *end points* of a given subsection.\n wl_gaps = numpy.where(numpy.digitize(wl_diffs, [3.*numpy.mean(wl_diffs)]) !=\n 0)[0]\n # If there are no gaps at all, then define the gap to be the last element.\n if wl_gaps.size == 0:\n wl_gaps = numpy.asarray([len(wls)-1])\n\n # Build the binned spectrum for each subspectrum (skipping over gaps).\n prev_index = 0\n binned_wls = []\n binned_fls = []\n # Only build up the interpolated spectrum if it is to be plotted.\n if showplot:\n interpolated_wls = []\n interpolated_fls = []\n\n for gap_ind in wl_gaps:\n # Get interpolated spectrum for this subsection.\n new_wls, new_fls = interpolate_subspec(wls, fls, prev_index, gap_ind,\n wl_step)\n # Push the interpolated values into the list via extension, but only if\n # it is to be plotted.\n if showplot:\n interpolated_wls.extend(new_wls)\n interpolated_fls.extend(new_fls)\n # Now bin the spectrum down by a factor of 10 in resolution to our\n # desired wavelength spacing.\n # First need to pad to an integer of 10 by adding NaNs.\n if len(new_wls) % 10 != 0:\n n_pad = 10 - (len(new_wls) % 10)\n new_wls.extend([numpy.nan]*n_pad)\n new_fls.extend([numpy.nan]*n_pad)\n binned_sub_wl = numpy.nanmean(numpy.asarray(new_wls).reshape(-1, 10),\n axis=1)\n binned_sub_fl = numpy.nanmean(numpy.asarray(new_fls).reshape(-1, 10),\n axis=1)\n binned_wls.extend(binned_sub_wl)\n binned_fls.extend(binned_sub_fl)\n # Update where the next sub_spectrum starts.\n prev_index = gap_ind+1\n\n # If the last gap did not cover to the end of the spectrum, do one more\n # subsection.\n if prev_index < len(wls):\n # Get interpolated spectrum for the final subsection.\n new_wls, new_fls = interpolate_subspec(wls, fls, prev_index, len(wls),\n wl_step)\n # Push the interpolated values into the list via extension, but only if\n # it is to be plotted.\n if showplot:\n interpolated_wls.extend(new_wls)\n interpolated_fls.extend(new_fls)\n # Now bin the spectrum down by a factor of 10 in resolution to our\n # desired wavelength spacing.\n # First need to pad to an integer of 10 by adding NaNs.\n if len(new_wls) % 10 != 0:\n n_pad = 10 - (len(new_wls) % 10)\n new_wls.extend([numpy.nan]*n_pad)\n new_fls.extend([numpy.nan]*n_pad)\n binned_sub_wl = numpy.nanmean(numpy.asarray(new_wls).reshape(-1, 10),\n axis=1)\n binned_sub_fl = numpy.nanmean(numpy.asarray(new_fls).reshape(-1, 10),\n axis=1)\n binned_wls.extend(binned_sub_wl)\n binned_fls.extend(binned_sub_fl)\n\n # Show the plotted spectra if requested.\n if showplot:\n import matplotlib.pyplot as pyp\n pyp.plot(wls, fls, '-ko')\n # Uncomment the lines below to overplot the (oversampled) interpolated\n # spectrum.\n if showplot:\n pyp.plot(interpolated_wls, interpolated_fls, '-ro')\n pyp.plot(binned_wls, binned_fls, '-go')\n for gapmark_ind in wl_gaps:\n pyp.axvline(wls[gapmark_ind])\n pyp.suptitle(\"Red = Oversampled, Green = Resampled, Black = Original\")\n pyp.show()\n return zip(binned_wls, binned_fls)", "def interpolate_subspec(wls, fls, prev_index, gap_ind, wl_step):\n # Get the subspectrum\n sub_spec_wls = wls[prev_index:gap_ind+1]\n sub_spec_fls = fls[prev_index:gap_ind+1]\n # Interpolate onto the new grid, using linear interpolation.\n interp_f = interp1d(sub_spec_wls, sub_spec_fls, kind=\"linear\")\n\n # Calculate the number of linear wavelength steps needed.\n min_wl = min(sub_spec_wls)\n max_wl = max(sub_spec_wls)\n n_steps = math.ceil((max_wl - min_wl) / wl_step)\n # Try a couple step sizes to get as close to the ideal size as possible.\n new_wls1, step_size1 = numpy.linspace(min_wl, max_wl, n_steps,\n retstep=True)\n new_wls2, step_size2 = numpy.linspace(min_wl, max_wl, n_steps+1,\n retstep=True)\n new_wls3, step_size3 = numpy.linspace(min_wl, max_wl, n_steps-1,\n retstep=True)\n # Choose the linear step size closest to our desired step size.\n diffs = [abs(x-wl_step) for x in [step_size1, step_size2, step_size3]]\n if diffs[0] <= diffs[1] and diffs[0] <= diffs[2]:\n new_wls = new_wls1\n elif diffs[1] <= diffs[2] and diffs[1] <= diffs[0]:\n new_wls = new_wls2\n else:\n new_wls = new_wls3\n # Calculate the interpolated values and extend the spectrum with them.\n return (list(new_wls), list(interp_f(new_wls)))", "def Schlafly16(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def interpolate_spectrum(interp,wave_ini,flux_ini,wave_fnl,flux_fnl):\n wave_ini_p = wave_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_ini_p = flux_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n wave_fnl_p = wave_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_fnl_p = flux_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n\n mask = np.zeros_like(wave_fnl).astype('int32')\n mask_p = mask.ctypes.data_as(ct.POINTER(ct.c_int))\n\n interp(wave_ini.shape[0],wave_fnl.shape[0],\n wave_ini_p,flux_ini_p,\n wave_fnl_p,flux_fnl_p,mask_p)\n\n return mask", "def get_surfaceflux_from_wavelength_and_laser_power(wavelength, rover_specs, laser_powers, receiver_areas,\n power_reqs, pointing_error=[1e-7, 1e-7]):\n assert len(power_reqs) == len(receiver_areas)\n assert len(power_reqs) == len(rover_specs)\n\n # Set the parameter space\n trans_radius = np.logspace(-3, 1, 1000)\n altitudes = np.logspace(4, 7, 1001)\n R, Z = np.meshgrid(trans_radius, altitudes, indexing=\"ij\")\n\n fig, ax = plt.subplots(len(power_reqs), len(laser_powers), sharey=True, sharex=True, figsize=(12, 7))\n for i, laser_power in enumerate(laser_powers):\n for j in range(len(power_reqs)):\n rover_spec = rover_specs[j]\n receiver_area = receiver_areas[j]\n power_req = power_reqs[j]\n\n # Get the beam radius\n beam_radius = R * np.sqrt(1.0 + (Z * wavelength / (np.pi * R ** 2)) ** 2)\n receiver_radius = np.sqrt(receiver_area / np.pi)\n radius_constraint_one = pointing_error[j] * Z + receiver_radius\n radius_constraint_two = pointing_error[j] * Z + beam_radius\n mask_one = beam_radius < radius_constraint_one\n mask_two = receiver_radius > radius_constraint_two\n final_mask = np.logical_and(mask_one, np.logical_not(mask_two))\n beam_radius[final_mask] = np.nan\n\n # Calculate the resulting surface flux\n receiver_power = laser_power/ (np.pi * beam_radius ** 2) * receiver_area\n receiver_power[np.pi * beam_radius ** 2 < receiver_area] = laser_power\n receiver_power[receiver_power < power_req] = np.nan\n\n # Normalise result by input power to get total efficiency\n receiver_power /= laser_power\n receiver_power[receiver_power < 0.001] = np.nan\n\n log_power = np.log10(receiver_power * 100)\n ax[j, i].contourf(np.log10(R), Z / 1e3, log_power, 100)\n m = cm.ScalarMappable()\n m.set_array(log_power)\n m.set_clim(-1.0, 2.0)\n fig.colorbar(m, ax=ax[j, i])\n ax[j, 0].set_ylabel('{} \\n Transmission distance [km]'.format(rover_spec))\n ax[0, i].set_title('Laser Power: {}kW'.format(laser_power / 1e3))\n ax[1, i].set_xlabel('Logarithm of Transmitter Radius [m]')\n plt.tight_layout()\n plt.show()\n\n return beam_radius, receiver_power", "def RiekeLebofsky85(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def wavelength_solution(file_name):\n file_data = read_file(file_name)\n header_data = file_data[0]\n image_data = file_data[1]\n\n range_begin = header_data['CRVAL3']\n pixel_begin = header_data['CRPIX3']\n step_size = header_data['CD3_3']\n steps = len(image_data)\n range_end = range_begin + steps * step_size\n return {'begin': range_begin, 'end': range_end, 'steps': steps}", "def match_wl(wl, spec, ref_wl, method=\"scipy\", kind=\"linear\", bounds_error=False):\n starttime = time.time()\n if method == \"scipy\":\n print(kind + \" scipy interpolation\")\n linear_interp = interp1d(wl, spec, kind=kind, bounds_error=False)\n new_spec = linear_interp(ref_wl)\n elif method == \"numpy\":\n if kind.lower() is not \"linear\":\n print(\"Warning: Cannot do \" + kind + \" interpolation with numpy, switching to linear\" )\n print(\"Linear numpy interpolation\")\n new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat\n else:\n print(\"Method was given as \" + method)\n raise(\"Not correct interpolation method specified\")\n print(\"Interpolation Time = \" + str(time.time() - starttime) + \" seconds\")\n\n return new_spec # test inperpolations ", "def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)", "def perform_point_interpolation(sub_sample_wvl, sub_sample_rad, center_wv):\n # let us define spectral resolution\n\n print(center_wv)\n dframe = pd.DataFrame()\n\n sampled_wvl = np.arange(min(sub_sample_wvl), max(sub_sample_wvl), 2)\n fit_params = interp1d(sub_sample_wvl, sub_sample_rad, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n dframe['wavelength'] = sampled_wvl\n dframe['rad'] = fitted_val\n return dframe", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n # the first column should be the wavelength in nanometers, the second is the tilt power density/nm in\n # W/(m**2 nm) = J s^-1 m^-2 nm^-1 = C V m^-2 nm^-1\n spectras = {\"AM0Etr\": 1, \"AM1.5G\": 2, \"AM1.5D\": 3}\n self.spectrum = np.genfromtxt(path.join(path.dirname(__file__), './ASTMG173.csv'), delimiter=\",\",\n skip_header=2)[:, [0, spectras[spectra]]]\n self.start_w = start_w\n self.stop_w = stop_w\n # build custom spectrum if necessary\n if start_w != 280.0 or stop_w != 4000.0:\n self.spectrum = self.sub_spectrum(start_w, stop_w)\n\n # create the PowerSpectrum interpolator\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def do_hc_wavesol(p, loc):\n\n # ----------------------------------------------------------------------\n # Read UNe solution\n # ----------------------------------------------------------------------\n wave_u_ne, amp_u_ne = spirouImage.ReadLineList(p)\n loc['LL_LINE'], loc['AMPL_LINE'] = wave_u_ne, amp_u_ne\n source = __NAME__ + '.main() + spirouImage.ReadLineList()'\n loc.set_sources(['ll_line', 'ampl_line'], source)\n\n # ----------------------------------------------------------------------\n # Generate wave map from wave solution\n # ----------------------------------------------------------------------\n loc = generate_wave_map(p, loc)\n\n # ----------------------------------------------------------------------\n # Create new wavelength solution (method 0, old cal_HC_E2DS_EA)\n # ----------------------------------------------------------------------\n if p['WAVE_MODE_HC'] == 0:\n\n # ---------------------------------------------------------------------\n # Find Gaussian Peaks in HC spectrum\n # ---------------------------------------------------------------------\n loc = find_hc_gauss_peaks(p, loc)\n\n # ---------------------------------------------------------------------\n # Start plotting session\n # ---------------------------------------------------------------------\n if p['DRS_PLOT'] > 0:\n # start interactive plot\n sPlt.start_interactive_session(p)\n\n # ---------------------------------------------------------------------\n # Fit Gaussian peaks (in triplets) to\n # ---------------------------------------------------------------------\n loc = fit_gaussian_triplets(p, loc)\n\n # ---------------------------------------------------------------------\n # Generate Resolution map and line profiles\n # ---------------------------------------------------------------------\n # log progress\n wmsg = 'Generating resolution map and calculating line spread function'\n WLOG(p, '', wmsg)\n # generate resolution map\n loc = generate_resolution_map(p, loc)\n # map line profile map\n if p['DRS_PLOT'] > 0:\n sPlt.wave_ea_plot_line_profiles(p, loc)\n\n # ---------------------------------------------------------------------\n # End plotting session\n # ---------------------------------------------------------------------\n # end interactive session\n if p['DRS_PLOT'] > 0:\n sPlt.end_interactive_session(p)\n\n # ----------------------------------------------------------------------\n # Set up all_lines storage\n # ----------------------------------------------------------------------\n\n # initialise up all_lines storage\n all_lines_1 = []\n\n # get parameters from p\n n_ord_start = p['WAVE_N_ORD_START']\n n_ord_final = p['WAVE_N_ORD_FINAL']\n\n # get values from loc:\n # line centers in pixels\n xgau = np.array(loc['XGAU_T'])\n # distance from catalogue in km/s - used for sanity checks\n dv = np.array(loc['DV_T'])\n # fitted polynomials per order\n fit_per_order = np.array(loc['POLY_WAVE_SOL'])\n # equivalent width of fitted gaussians to each line (in pixels)\n ew = np.array(loc['EW_T'])\n # amplitude of fitted gaussians to each line\n peak = np.array(loc['PEAK_T'])\n # catalogue line amplitude\n amp_catalog = np.array(loc['AMP_CATALOG'])\n # catalogue line wavelength\n wave_catalog = np.array(loc['WAVE_CATALOG'])\n # spectral order for each line\n ord_t = np.array(loc['ORD_T'])\n\n # loop through orders\n for iord in range(n_ord_start, n_ord_final):\n # keep relevant lines\n # -> right order\n # -> finite dv\n gg = (ord_t == iord) & (np.isfinite(dv))\n # put lines into ALL_LINES structure\n # reminder:\n # gparams[0] = output wavelengths\n # gparams[1] = output sigma(gauss fit width)\n # gparams[2] = output amplitude(gauss fit)\n # gparams[3] = difference in input / output wavelength\n # gparams[4] = input amplitudes\n # gparams[5] = output pixel positions\n # gparams[6] = output pixel sigma width (gauss fit width in pixels)\n # gparams[7] = output weights for the pixel position\n\n # dummy array for weights\n test = np.ones(np.shape(xgau[gg]), 'd') * 1e4\n # get the final wavelength value for each peak in the order\n output_wave_1 = np.polyval(fit_per_order[iord][::-1], xgau[gg])\n # convert the pixel equivalent width to wavelength units\n xgau_ew_ini = xgau[gg] - ew[gg] / 2\n xgau_ew_fin = xgau[gg] + ew[gg] / 2\n ew_ll_ini = np.polyval(fit_per_order[iord, :], xgau_ew_ini)\n ew_ll_fin = np.polyval(fit_per_order[iord, :], xgau_ew_fin)\n ew_ll = ew_ll_fin - ew_ll_ini\n # put all lines in the order into single array\n gau_params = np.column_stack((output_wave_1, ew_ll, peak[gg],\n wave_catalog[gg] - output_wave_1,\n amp_catalog[gg],\n xgau[gg], ew[gg], test))\n # append the array for the order into a list\n all_lines_1.append(gau_params)\n\n # add to loc\n loc['ALL_LINES_1'] = all_lines_1\n loc['LL_PARAM_1'] = np.array(fit_per_order)\n loc['LL_OUT_1'] = np.array(loc['WAVE_MAP2'])\n loc.set_sources(['ALL_LINES_1', 'LL_PARAM_1'], __NAME__ + '/main()')\n\n # For compatibility w/already defined functions, I need to save\n # here all_lines_2\n all_lines_2 = list(all_lines_1)\n loc['ALL_LINES_2'] = all_lines_2\n\n # ------------------------------------------------------------------\n # Littrow test\n # ------------------------------------------------------------------\n\n start = p['IC_LITTROW_ORDER_INIT_1']\n end = p['IC_LITTROW_ORDER_FINAL_1']\n\n # calculate echelle orders\n o_orders = np.arange(start, end)\n echelle_order = p['IC_HC_T_ORDER_START'] - o_orders\n loc['ECHELLE_ORDERS'] = echelle_order\n loc.set_source('ECHELLE_ORDERS', __NAME__ + '/main()')\n\n # Do Littrow check\n ckwargs = dict(ll=loc['LL_OUT_1'][start:end, :], iteration=1, log=True)\n loc = calculate_littrow_sol(p, loc, **ckwargs)\n\n # Plot wave solution littrow check\n if p['DRS_PLOT'] > 0:\n # plot littrow x pixels against fitted wavelength solution\n sPlt.wave_littrow_check_plot(p, loc, iteration=1)\n\n # ------------------------------------------------------------------\n # extrapolate Littrow solution\n # ------------------------------------------------------------------\n ekwargs = dict(ll=loc['LL_OUT_1'], iteration=1)\n loc = extrapolate_littrow_sol(p, loc, **ekwargs)\n\n # ------------------------------------------------------------------\n # Plot littrow solution\n # ------------------------------------------------------------------\n if p['DRS_PLOT'] > 0:\n # plot littrow x pixels against fitted wavelength solution\n sPlt.wave_littrow_extrap_plot(p, loc, iteration=1)\n\n return loc", "def reference_wl(infilename, outfilename, regfilename, frameid, calib_lst):\n data, head = fits.getdata(infilename, header=True)\n\n npoints = data['points'].max()\n\n newdescr = [descr for descr in data.dtype.descr]\n # add new columns\n newdescr.append(('order',np.int16))\n newdescr.append(('wavelength','>f8',(npoints,)))\n\n newspec = []\n\n # prepare for self reference. means one channel is ThAr\n file_identlist = []\n\n # find unique channels in the input spectra\n channel_lst = np.unique(data['channel'])\n\n # open region file and write headers\n regfile = open(regfilename, 'w')\n regfile.write('# Region file format: DS9 version 4.1'+os.linesep)\n regfile.write('global dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" ')\n regfile.write('select=1 highlite=1 dash=0 fixed=1 edit=0 move=0 delete=0 include=1 source=1'+os.linesep)\n\n # find aperture locations\n aperture_coeffs = get_aperture_coeffs_in_header(head)\n\n # loop all channels\n for channel in sorted(channel_lst):\n\n # filter the spectra in current channel\n mask = (data['channel'] == channel)\n if mask.sum() == 0:\n continue\n spec = data[mask]\n\n # check if the current frameid & channel are in calib_lst\n if frameid in calib_lst and channel in calib_lst[frameid]:\n self_reference = True\n calib = calib_lst[frameid][channel]\n else:\n self_reference = False\n # find the closet ThAr\n refcalib_lst = []\n if frameid <= min(calib_lst):\n calib = calib_lst[min(calib_lst)][channel]\n refcalib_lst.append(calib)\n elif frameid >= max(calib_lst):\n calib = calib_lst[max(calib_lst)][channel]\n refcalib_lst.append(calib)\n else:\n for direction in [-1, +1]:\n _frameid = frameid\n while(True):\n _frameid += direction\n if _frameid in calib_lst and channel in calib_lst[_frameid]:\n calib = calib_lst[_frameid][channel]\n refcalib_lst.append(calib)\n #print(item.frameid, 'append',channel, frameid)\n break\n elif _frameid <= min(calib_lst) or _frameid >= max(calib_lst):\n break\n else:\n continue\n\n # get variable shortcuts.\n # in principle, these parameters in refcalib_lst should have the same\n # values. so just use the last calib solution\n k = calib['k']\n offset = calib['offset']\n xorder = calib['xorder']\n yorder = calib['yorder']\n\n if self_reference:\n coeff = calib['coeff']\n else:\n # calculate the average coefficients\n coeff_lst = np.array([_calib['coeff'] for _calib in refcalib_lst])\n coeff = coeff_lst.mean(axis=0, dtype=np.float64)\n\n # write important parameters into the FITS header\n leading_str = 'HIERARCH GAMSE WLCALIB CHANNEL %s'%channel\n head[leading_str+' K'] = k\n head[leading_str+' OFFSET'] = offset\n head[leading_str+' XORDER'] = xorder\n head[leading_str+' YORDER'] = yorder\n\n # write the coefficients\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n head[leading_str+' COEFF %d %d'%(j, i)] = coeff[j,i]\n\n # if the input spectra is a wavelength standard frame (e.g. ThAr), write\n # calibration solutions into FITS header\n if self_reference:\n head[leading_str+' MAXITER'] = calib['maxiter']\n head[leading_str+' STDDEV'] = calib['std']\n head[leading_str+' WINDOWSIZE'] = calib['window_size']\n head[leading_str+' NTOT'] = calib['ntot']\n head[leading_str+' NUSE'] = calib['nuse']\n head[leading_str+' NPIXEL'] = calib['npixel']\n\n # pack the identfied line list\n for aperture, list1 in calib['identlist'].items():\n for row in list1:\n file_identlist.append(row)\n\n for row in spec:\n aperture = row['aperture']\n npixel = len(row['wavelength'])\n order = aperture*k + offset\n wl = get_wavelength(coeff, npixel, np.arange(npixel), np.repeat(order, npixel))\n\n # add wavelength into FITS table\n item = list(row)\n item.append(order)\n item.append(wl)\n newspec.append(tuple(item))\n\n # write wavlength information into regfile\n if (channel, aperture) in aperture_coeffs:\n coeffs = aperture_coeffs[(channel, aperture)]\n position = poly.Chebyshev(coef=coeffs, domain=[0, npixel-1])\n color = {'A': 'red', 'B': 'green'}[channel]\n\n # write text in the left edge\n x = -6\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={A%d, O%d} color=%s'\n text = string%(x+1, y+1, aperture, order, color)\n regfile.write(text+os.linesep)\n print('-------'+text)\n\n # write text in the right edge\n x = npixel-1+6\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={A%d, O%d} color=%s'\n text = string%(x+1, y+1, aperture, order, color)\n regfile.write(text+os.linesep)\n\n # write text in the center\n x = npixel/2.\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={Channel %s, Aperture %3d, Order %3d} color=%s'\n text = string%(x+1, y+1+5, channel, aperture, order, color)\n regfile.write(text+os.linesep)\n\n # draw lines\n x = np.linspace(0, npixel-1, 50)\n y = position(x)\n for (x1,x2), (y1, y2) in zip(pairwise(x), pairwise(y)):\n string = 'line(%7.2f,%7.2f,%7.2f,%7.2f) # color=%s'\n text = string%(x1+1, y1+1, x2+1, y2+1, color)\n regfile.write(text+os.linesep)\n\n # draw ticks at integer wavelengths\n pix = np.arange(npixel)\n if wl[0] > wl[-1]:\n wl = wl[::-1]\n pix = pix[::-1]\n f = intp.InterpolatedUnivariateSpline(wl, pix, k=3)\n w1 = wl.min()\n w2 = wl.max()\n for w in np.arange(int(math.ceil(w1)), int(math.floor(w2))+1):\n x = f(w)\n y = position(x)\n if w%10==0:\n ticklen = 3\n string = '# text(%7.2f, %7.2f) text={%4d} color=%s'\n text = string%(x+1+20, y+1+5, w, color)\n regfile.write(text+os.linesep)\n else:\n ticklen = 1\n string = 'line(%7.2f, %7.2f, %7.2f, %7.2f) # color=%s wl=%d'\n text = string%(x+1+20, y+1, x+1+20, y+1+ticklen, color, w)\n regfile.write(text+os.linesep)\n\n # draw identified lines in region file\n if self_reference and aperture in calib['identlist']:\n list1 = calib['identlist'][aperture]\n for row in list1:\n x = row['pixel']\n y = position(x)\n ps = ('x', 'circle')[row['mask']]\n string = 'point(%7.2f, %7.2f) # point=%s color=%s wl=%9.4f'\n text = string%(x+1, y+1, ps, color, row['wavelength'])\n regfile.write(text+os.linesep)\n\n newspec = np.array(newspec, dtype=newdescr)\n\n regfile.close()\n\n pri_hdu = fits.PrimaryHDU(header=head)\n tbl_hdu1 = fits.BinTableHDU(newspec)\n lst = [pri_hdu, tbl_hdu1]\n\n if len(file_identlist)>0:\n #file_identlist = np.array(file_identlist, dtype=identlinetype)\n file_identlist = np.array(file_identlist, dtype=list1.dtype)\n tbl_hdu2 = fits.BinTableHDU(file_identlist)\n lst.append(tbl_hdu2)\n hdu_lst = fits.HDUList(lst)\n\n if os.path.exists(outfilename):\n os.remove(outfilename)\n hdu_lst.writeto(outfilename)", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1", "def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)", "def interpolate_variable(self, wavelengths, apertures):\n\n if self.n_ap == 1:\n return self.flux[0, :]\n\n sed_apertures = self.apertures.to(u.au).value\n sed_wav = self.wav.to(u.micron).value\n\n # If any apertures are larger than the defined max, reset to max\n apertures[apertures > sed_apertures.max()] = sed_apertures.max() * 0.999\n\n # If any apertures are smaller than the defined min, raise Exception\n if np.any(apertures < sed_apertures.min()):\n raise Exception(\"Aperture(s) requested too small\")\n\n # Find wavelength order\n order = np.argsort(wavelengths)\n\n # Interpolate apertures vs wavelength\n log10_ap_interp = interp1d(np.log10(wavelengths[order]), np.log10(apertures[order]), bounds_error=False, fill_value=np.nan)\n\n # Create interpolating function\n flux_interp = interp1d(sed_apertures, self.flux.swapaxes(0, 1))\n\n # Interpolate the apertures\n apertures = 10. ** log10_ap_interp(np.log10(sed_wav))\n\n # Extrapolate on either side\n apertures[np.log10(sed_wav) < log10_ap_interp.x[0]] = 10. ** log10_ap_interp.y[0]\n apertures[np.log10(sed_wav) > log10_ap_interp.x[-1]] = 10. ** log10_ap_interp.y[-1]\n\n # Interpolate and return only diagonal elements\n return flux_interp(apertures).diagonal()", "def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave", "def main():\n# pixel_to_wavelen_dir = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\\n# Spectral_Band_pass\\Pixel_to_wavelen_map'\n\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM_only_Gaussian'\n radiance_file = read_radiance_data()\n file_path_2 = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM\\spectral_bandpass_1400'\n\n #start with Gaussian Bandpass\n# data_names = [each for each in os.listdir(file_path)\n# if each.startswith(\"Params_Gauss\")]\n#\n#\n# sample_data = []\n# for data_files in data_names[9:]:\n# #print(data_files)\n#\n# wavelen_suffix = data_files.split('_')[-1]\n#\n# pixel_to_wvl_map_data = sorted([each for each in os.listdir(pixel_to_wavelen_dir)\n# if each.endswith(wavelen_suffix)])\n#\n# gaussian_files = os.path.join(file_path, data_files)\n#\n# dframe = pd.read_csv(gaussian_files)\n# #dframe = dframe[['A1', 'A2', 'Sigma1', 'Sigma2']]\n# dframe = dframe[['A1', 'Sigma1']] # for Gaussian only\n# pixel_to_wav_map = os.path.join(pixel_to_wavelen_dir, pixel_to_wvl_map_data[0])\n# dframe1 = pd.read_csv(pixel_to_wav_map)\n# dframe['CW'] = dframe1['CW']\n# dframe = dframe.iloc[1400]\n# sample_data.append(dframe.values)\n # for flat top Gaussian\n# #gaussian_values = perform_spectral_interpolation(np.array(sample_data))\n\n# gaussian_values = perform_spectral_interpolation_only_gaussian(np.array(sample_data))\n#\n##\n## # Let us now create a spectral bandpass\n# #create_spectral_bandpass(gaussian_values, radiance_file, file_path) # flat top Gaussian\n# create_spectral_bandpass_only_gaussian(gaussian_values, radiance_file, file_path)\n#\n#\n## #Make sure that the center wavelength of Gaussians are the same\n## sample_val = []\n## data_names_interpol = sorted([each for each in os.listdir(file_path_2)\n## if each.endswith('csv')])\n## interpol_wavelen = []\n## interpol_rad = [ ]\n##\n## for i in range(0, 64):\n## sub_sample_wvl = []\n## sub_sample_rad = []\n##\n## for files in data_names_interpol[9:]:\n##\n## interpol_rsr = os.path.join(file_path_2, files)\n## dframe = pd.read_csv(interpol_rsr, usecols=[\"wavelength\", \"rad\"])\n##\n## wavelength = dframe['wavelength'][i]\n## rad = dframe['rad'][i]\n## sub_sample_wvl.append(wavelength)\n## sub_sample_rad.append(rad)\n## dframe = perform_point_interpolation(sub_sample_wvl, sub_sample_rad,\n #np.array(sample_data)[:,-1])\n## interpol_rad.append(dframe['rad'].values)\n## interpol_wavelen.append(dframe['wavelength'].values)\n## create_spectral_bandpass_interpol(np.array(interpol_wavelen),\n #np.array(interpol_rad),\n #np.array(sample_data)[:,-1], file_path_2)\n# cc\n##\n#\n##\n###\n## # let us now perfrom spectral convolution with high res. radiance data\n calculate_in_band_irradiance(file_path, file_path_2, radiance_file)", "def use_w(args):\n try:\n bounddata = Table.read(\n f'./Input/UseWv/WaveRegions_{args.WRegion}_{args.band}.csv',\n format='csv')\n except IOError:\n sys.exit(\n f'WaveRegions FILE \"./Input/UseWv/WaveRegions'\n '_{args.WRegion}_{args.band}.csv\" NOT FOUND!')\n\n wavesols = pd.read_csv(f'./Input/UseWv/WaveSolns_{args.band}.csv')\n#-------------------------------------------------------------------------------\n XRegion_dir = f'./Input/UseWv/XRegions_{args.WRegion}_{args.band}.csv'\n with open(XRegion_dir,'w') as filew:\n filew.write('order, start, end, masks\\n')\n\n m_order = np.array(bounddata['order'])\n starts = np.array(bounddata['start'])\n ends = np.array(bounddata['end'])\n ords = list( sorted(OrderDictCla().orderdict[args.band].keys()) )\n\n Ostarts = [OrderDictCla().orderdict[args.band][k][0] for k in ords]\n Oends = [OrderDictCla().orderdict[args.band][k][1] for k in ords]\n labels = []\n\n m_orders_unique = np.unique(m_order)\n\n # For each order specified, find what pixel numbers correspond to the\n # wavelength bounds presented.\n # If multiple wavelength bounds given for a single order, output a\n # pixel mask between the two, as well.\n for o in range(len(m_orders_unique)):\n\n # if len(m_orders_unique) == 9:\n # filew.write('9, 150, 1950, []\\n')\n # continue\n\n pixs = []\n mini = np.where(m_order == m_orders_unique[o])[0]\n for j in range(len(mini)):\n i = mini[j]\n\n wavebounds = [starts[i],ends[i]]\n wO = wavesols['w'+str(m_orders_unique[o])]\n pixO = wavesols['x'+str(m_orders_unique[o])]\n pix = [pixO[(np.argmin(abs(wO-wavebounds[k])))] for k in [0,1]]\n pixs = pixs + pix\n\n pixsS = list(sorted(pixs))\n q = pixsS[1:-1]\n if len(pixsS) == 2:\n filew.write('{}, {}, {},[]\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1])\n )\n else:\n filew.write('{}, {}, {},\"{}\"\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1],\n [[first,second] for first, second in zip(q[0::2], q[1::2])]\n ))", "def runWavelengthDependency():\n RunData([getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/')[0],], out='I600nmwave',\n wavelength='l600')\n RunData([getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/')[0],], out='I700nmwave',\n wavelength='l700')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[0],], out='I800nmwave',\n wavelength='l800')\n RunData([getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/')[4],], out='I890nmwave',\n wavelength='l890')", "def calculate_flux(self, band):\n\n if (self.wavelength[0] > band.wavelength[0] or\n self.wavelength[-1] < band.wavelength[-1]):\n\n warn('Spectrum does not cover the whole bandpass, '\n 'extrapolating...')\n dw = np.median(np.diff(self.wavelength.value))\n spec_wavelength = np.arange(\n band.wavelength.value[0],\n band.wavelength.value[-1] + dw, dw) * angstrom\n spec_flux = np.interp(spec_wavelength, self.wavelength,\n self.flux.value)\n\n else:\n spec_wavelength = self.wavelength\n spec_flux = self.flux.value\n\n i, j = spec_wavelength.searchsorted(\n Quantity([band.wavelength[0], band.wavelength[-1]]))\n wavelength = spec_wavelength[i:j]\n flux = spec_flux[i:j]\n\n dw_band = np.median(np.diff(band.wavelength))\n dw_spec = np.median(np.diff(wavelength))\n\n if dw_spec.value > dw_band.value > 20:\n\n warn('Spectrum wavelength sampling interval {0:.2f}, but bandpass'\n 'sampling interval {1:.2f}'.format(dw_spec, dw_band))\n\n # Interpolate the spectrum to the passband wavelengths:\n flux = np.interp(band.wavelength, wavelength, flux)\n band_transmission = band.transmission\n wavelength = band.wavelength\n\n else:\n # Interpolate the band transmission to the spectrum wavelengths:\n band_transmission = np.interp(\n wavelength, band.wavelength, band.transmission)\n\n # Weight by the response and wavelength, appropriate when we're\n # counting the number of photons within the band:\n flux = (np.trapz(band_transmission * flux * wavelength, wavelength) /\n np.trapz(band_transmission * wavelength, wavelength))\n flux *= erg / s / cm ** 2 / angstrom\n\n return flux", "def smooth_spectrum(wlm, s, wave_min=0, wave_max=0, step=50, exclude_wlm=[[0,0]], order=7, \n weight_fit_median=0.5, plot=False, verbose=False, fig_size=12):\n\n if verbose: print(\"\\n> Computing smooth spectrum...\")\n\n if wave_min == 0 : wave_min = wlm[0]\n if wave_max == 0 : wave_max = wlm[-1]\n \n running_wave = [] \n running_step_median = []\n cuts=np.int( (wave_max - wave_min) /step)\n \n exclude = 0 \n corte_index=-1\n for corte in range(cuts+1):\n next_wave= wave_min+step*corte\n if next_wave < wave_max:\n if next_wave > exclude_wlm[exclude][0] and next_wave < exclude_wlm[exclude][1]:\n if verbose: print(\" Skipping \",next_wave, \" as it is in the exclusion range [\",exclude_wlm[exclude][0],\",\",exclude_wlm[exclude][1],\"]\") \n\n else:\n corte_index=corte_index+1\n running_wave.append (next_wave)\n region = np.where((wlm > running_wave[corte_index]-step/2) & (wlm < running_wave[corte_index]+step/2)) \n running_step_median.append (np.nanmedian(s[region]) )\n if next_wave > exclude_wlm[exclude][1]:\n exclude = exclude + 1\n #if verbose and exclude_wlm[0] != [0,0] : print \"--- End exclusion range \",exclude \n if exclude == len(exclude_wlm) : exclude = len(exclude_wlm)-1 \n \n running_wave.append (wave_max)\n region = np.where((wlm > wave_max-step) & (wlm < wave_max+0.1))\n running_step_median.append (np.nanmedian(s[region]) )\n \n # Check not nan\n _running_wave_=[]\n _running_step_median_=[]\n for i in range(len(running_wave)):\n if np.isnan(running_step_median[i]):\n if verbose: print(\" There is a nan in \",running_wave[i])\n else:\n _running_wave_.append (running_wave[i])\n _running_step_median_.append (running_step_median[i])\n \n fit = np.polyfit(_running_wave_, _running_step_median_, order)\n pfit = np.poly1d(fit)\n fit_median = pfit(wlm)\n \n interpolated_continuum_smooth = interpolate.splrep(_running_wave_, _running_step_median_, s=0.02)\n fit_median_interpolated = interpolate.splev(wlm, interpolated_continuum_smooth, der=0)\n \n if plot: \n plt.figure(figsize=(fig_size, fig_size/2.5)) \n plt.plot(wlm,s, alpha=0.5)\n plt.plot(running_wave,running_step_median, \"+\", ms=15, mew=3)\n plt.plot(wlm, fit_median, label=\"fit median\")\n plt.plot(wlm, fit_median_interpolated, label=\"fit median_interp\")\n plt.plot(wlm, weight_fit_median*fit_median + (1-weight_fit_median)*fit_median_interpolated, label=\"weighted\")\n #extra_display = (np.nanmax(fit_median)-np.nanmin(fit_median)) / 10\n #plt.ylim(np.nanmin(fit_median)-extra_display, np.nanmax(fit_median)+extra_display)\n ymin = np.nanpercentile(s,1)\n ymax= np.nanpercentile(s,99)\n rango = (ymax-ymin)\n ymin = ymin - rango/10.\n ymax = ymax + rango/10. \n plt.ylim(ymin,ymax)\n plt.xlim(wlm[0]-10, wlm[-1]+10)\n plt.minorticks_on()\n plt.legend(frameon=False, loc=1, ncol=1)\n\n plt.axvline(x=wave_min, color='k', linestyle='--')\n plt.axvline(x=wave_max, color='k', linestyle='--')\n\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n \n if exclude_wlm[0][0] != 0:\n for i in range(len(exclude_wlm)):\n plt.axvspan(exclude_wlm[i][0], exclude_wlm[i][1], color='r', alpha=0.1) \n plt.show()\n plt.close()\n print(' Weights for getting smooth spectrum: fit_median =',weight_fit_median,' fit_median_interpolated =',(1-weight_fit_median))\n\n return weight_fit_median*fit_median + (1-weight_fit_median)*fit_median_interpolated # (fit_median+fit_median_interpolated)/2 # Decide if fit_median or fit_median_interpolated", "def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def Fritz11(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def Damineli16(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def Cardelli89(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def fp_wavelength_sol_new(p, loc):\n func_name = __NAME__ + '.fp_wavelength_sol_new()'\n # get parameters from p\n dopd0 = p['IC_FP_DOPD0']\n fit_deg = p['IC_FP_FIT_DEGREE']\n fp_large_jump = p['IC_FP_LARGE_JUMP']\n n_ord_start_fp = p['IC_FP_N_ORD_START']\n n_ord_final_fp = p['IC_FP_N_ORD_FINAL']\n cm_ind = p['IC_WAVE_FP_CM_IND']\n\n # find FP lines\n loc = find_fp_lines_new(p, loc)\n all_lines_2 = loc['ALL_LINES_2']\n # set up storage\n llpos_all, xxpos_all, ampl_all = [], [], []\n m_fp_all, weight_bl_all, order_rec_all, dopd_all = [], [], [], []\n ll_prev, m_prev = np.array([]), np.array([])\n # loop through the orders from red to blue\n for order_num in range(n_ord_final_fp, n_ord_start_fp - 1, -1):\n # define storage\n floc = dict()\n # select the lines in the order\n gg = loc['ORDPEAK'] == order_num\n # store the initial wavelengths of the lines\n # floc['llpos'] = np.polynomial.chebyshev.chebval(\n # loc['XPEAK'][gg],\n # loc['LITTROW_EXTRAP_PARAM_1'][order_num])\n floc['llpos'] = np.polyval(\n loc['LITTROW_EXTRAP_PARAM_1'][order_num][::-1],\n loc['XPEAK'][gg])\n # store the pixel positions of the lines\n floc['xxpos'] = loc['XPEAK'][gg]\n # get the median pixel difference between successive lines\n # (to check for gaps)\n xxpos_diff_med = np.nanmedian(floc['xxpos'][1:] - floc['xxpos'][:-1])\n # store the amplitudes of the lines\n floc['ampl'] = loc['AMPPEAK'][gg]\n # store the values of the blaze at the pixel positions of the lines\n floc['weight_bl'] = np.zeros_like(floc['llpos'])\n # get and normalize blaze for the order\n nblaze = loc['BLAZE'][order_num] / np.nanmax(loc['BLAZE'][order_num])\n for it in range(1, len(floc['llpos'])):\n floc['weight_bl'][it] = nblaze[int(np.round(floc['xxpos'][it]))]\n # store the order numbers\n floc['order_rec'] = loc['ORDPEAK'][gg]\n # set up storage for line numbers\n mpeak = np.zeros_like(floc['llpos'])\n # line number for the last (reddest) line of the order (by FP equation)\n mpeak[-1] = int(dopd0 / floc['llpos'][-1])\n # calculate successive line numbers\n for it in range(len(floc['llpos']) - 2, -1, -1):\n # check for gap in x positions\n flocdiff = floc['xxpos'][it + 1] - floc['xxpos'][it]\n lowcond = xxpos_diff_med - (0.25 * xxpos_diff_med)\n highcond = xxpos_diff_med + (0.25 * xxpos_diff_med)\n if lowcond < flocdiff < highcond:\n # no gap: add 1 to line number of previous line\n mpeak[it] = mpeak[it + 1] + 1\n # if there is a gap, fix it\n else:\n # get line x positions\n flocx0 = floc['xxpos'][it]\n flocx1 = floc['xxpos'][it + 1]\n # get line wavelengths\n floc0 = floc['llpos'][it]\n floc1 = floc['llpos'][it + 1]\n # estimate the number of peaks missed\n m_offset = int(np.round((flocx1 - flocx0) / xxpos_diff_med))\n # add to m of previous peak\n mpeak[it] = mpeak[it + 1] + m_offset\n # verify there's no dopd jump, fix if present\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_1 - dopd_2 > fp_large_jump:\n while (dopd_1 - dopd_2) > fp_large_jump:\n mpeak[it] = mpeak[it] - 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n elif dopd_1 - dopd_2 < -fp_large_jump:\n while (dopd_1 - dopd_2) < -fp_large_jump:\n mpeak[it] = mpeak[it] + 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # determination of observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n # for orders other than the reddest, attempt to cross-match\n if order_num != n_ord_final_fp:\n # check for overlap\n if floc['llpos'][cm_ind] > ll_prev[0]:\n # find closest peak in overlap and get its m value\n ind = np.abs(ll_prev - floc['llpos'][cm_ind]).argmin()\n # the peak matching the reddest may not always be found!!\n # define maximum permitted difference\n llpos_diff_med = np.nanmedian(\n floc['llpos'][1:] - floc['llpos'][:-1])\n # print(llpos_diff_med)\n # print(abs(ll_prev[ind] - floc['llpos'][-1]))\n # check if the difference is over the limit\n if abs(ll_prev[ind] - floc['llpos'][-1]) > 1.5 * llpos_diff_med:\n # print('overlap line not matched')\n ll_diff = ll_prev[ind] - floc['llpos'][-1]\n ind2 = -2\n # loop over next reddest peak until they match\n while ll_diff > 1.5 * llpos_diff_med:\n # check there is still overlap\n if floc['llpos'][ind2] > ll_prev[0]:\n ind = np.abs(ll_prev - floc['llpos'][ind2]).argmin()\n ll_diff = ll_prev[ind] - floc['llpos'][ind2]\n ind2 -= 1\n else:\n break\n m_match = m_prev[ind]\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_match - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, m_match - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n else:\n wmsg = 'No overlap for order {0}'\n WLOG(p, 'warning', wmsg.format(order_num))\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n m_test = mpeak[cm_ind]\n # get dopd for last line of current & first of last order\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0) * 1.e-3\n dopd_prev = (m_prev[0] * ll_prev[0] - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_curr - dopd_prev > fp_large_jump:\n while (dopd_curr - dopd_prev) > fp_large_jump:\n m_test = m_test - 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n elif dopd_curr - dopd_prev < -fp_large_jump:\n while (dopd_curr - dopd_prev) < -fp_large_jump:\n m_test = m_test + 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_test - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, mpeak[cm_ind] - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n\n # add to storage\n llpos_all += list(floc['llpos'])\n xxpos_all += list(floc['xxpos'])\n ampl_all += list(floc['ampl'])\n m_fp_all += list(floc['m_fp'])\n weight_bl_all += list(floc['weight_bl'])\n order_rec_all += list(floc['order_rec'])\n # difference in cavity width converted to microns\n dopd_all += list((floc['dopd_t'] - dopd0) * 1.e-3)\n # save numpy arrays of current order to be previous in next loop\n ll_prev = np.array(floc['llpos'])\n m_prev = np.array(floc['m_fp'])\n\n # convert to numpy arrays\n llpos_all = np.array(llpos_all)\n xxpos_all = np.array(xxpos_all)\n ampl_all = np.array(ampl_all)\n m_fp_all = np.array(m_fp_all)\n weight_bl_all = np.array(weight_bl_all)\n order_rec_all = np.array(order_rec_all)\n dopd_all = np.array(dopd_all)\n\n # fit a polynomial to line number v measured difference in cavity\n # width, weighted by blaze\n with warnings.catch_warnings(record=True) as w:\n coeffs = nanpolyfit(m_fp_all, dopd_all, fit_deg, w=weight_bl_all)[::-1]\n spirouCore.WarnLog(p, w, funcname=func_name)\n # get the values of the fitted cavity width difference\n cfit = np.polyval(coeffs[::-1], m_fp_all)\n # update line wavelengths using the new cavity width fit\n newll = (dopd0 + cfit * 1000.) / m_fp_all\n # insert fp lines into all_lines2 (at the correct positions)\n all_lines_2 = insert_fp_lines(p, newll, llpos_all, all_lines_2,\n order_rec_all, xxpos_all, ampl_all)\n\n # add to loc\n loc['FP_LL_POS'] = llpos_all\n loc['FP_XX_POS'] = xxpos_all\n loc['FP_M'] = m_fp_all\n loc['FP_DOPD_OFFSET'] = dopd_all\n loc['FP_AMPL'] = ampl_all\n loc['FP_LL_POS_NEW'] = newll\n loc['ALL_LINES_2'] = all_lines_2\n loc['FP_DOPD_OFFSET_COEFF'] = coeffs\n loc['FP_DOPD_OFFSET_FIT'] = cfit\n loc['FP_ORD_REC'] = order_rec_all\n # set sources\n sources = ['FP_LL_POS', 'FP_XX_POS', 'FP_M', 'FP_DOPD_OFFSET',\n 'FP_AMPL', 'FP_LL_POS_NEW', 'ALL_LINES_2',\n 'FP_DOPD_OFFSET_COEFF', 'FP_DOPD_OFFSET_FIT', 'FP_ORD_REC']\n loc.set_sources(sources, func_name)\n\n return loc", "def _reduce_resolution(wi, fi, fwhm0=0.55, sigma_floor=0.2):\n\n # all in AA\n w_lick_res = (4000., 4400., 4900., 5400., 6000.)\n lick_res = (11.5, 9.2, 8.4, 8.4, 9.8) # FWHM in AA\n\n w = np.asarray(wi)\n flux = np.atleast_2d(fi)\n\n # Linear interpolation of lick_res over w\n # numpy interp does constant instead of extrapolation\n # res = np.interp(w, w_lick_res, lick_res)\n\n # spline order: 1 linear, 2 quadratic, 3 cubic ...\n from scipy.interpolate import InterpolatedUnivariateSpline\n res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w)\n\n # Compute width from fwhm\n const = 2. * np.sqrt(2. * np.log(2)) # conversion fwhm --> sigma\n lick_sigma = np.sqrt((res ** 2 - fwhm0 ** 2)) / const\n\n # Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2))\n flux_red = np.zeros(flux.shape, dtype=flux.dtype)\n\n for i, sigma in enumerate(lick_sigma):\n maxsigma = 3. * sigma\n # sampling floor: min (0.2, sigma * 0.1)\n delta = min(sigma_floor, sigma * 0.1)\n delta_wj = np.arange(-maxsigma, + maxsigma, delta)\n wj = delta_wj + w[i]\n for k, fk in enumerate(flux):\n fluxj = np.interp(wj, w, fk, left=0., right=0.)\n flux_red[k, i] = np.sum(fluxj * delta *\n np.exp(-0.5 * (delta_wj / sigma) ** 2))\n\n flux_red /= lick_sigma * const\n\n return flux_red.reshape(np.shape(fi))", "def Hosek18(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def get_spectra(self, map1, map2=None, nl=None, pseudo=False, analytic_errors=False):\n\t\tmap1 = np.asarray(map1)\n\t\tif map2 is None: # Auto\n\t\t\tpcl = hp.anafast(map1 * self.mask, lmax=self.lmax)\n\t\telse: # Cross\n\t\t\tmap2 = np.asarray(map2)\n\t\t\tpcl = hp.anafast(map1 * self.mask, map2=map2 * self.mask, lmax=self.lmax)\n\t\t\n\t\tif analytic_errors: \n\t\t\tpcl_tot = pcl\n\n\t\tif nl is not None:\n\t\t\tif nl.size - 1 < self.lmax:\n\t\t\t\traise ValueError('The noise power spectrum does not have enough l.')\n\n\t\tif self.MASTER:\n\t\t\tif nl is None: \n\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl))\n\t\t\telse:\n\t\t\t\tif pseudo:\n\t\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl - nl[:self.lmax+1]))\n\t\t\t\telse:\n\t\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl)) - self.bin_spectra(nl[:self.lmax+1])\t\t\t\n\t\t\tif analytic_errors and map2 is None:\n\t\t\t\tcl_tot = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl_tot))\n\t\telse: # f_sky approx\n\t\t\tif nl is None:\n\t\t\t\tcl = np.dot(self.P_bl, pcl/self.weight)/self.fsky\n\t\t\telse:\n\t\t\t\tif pseudo:\n\t\t\t\t\tcl = self.bin_spectra(pcl/self.weight - nl[:self.lmax+1]) / self.fsky\n\t\t\t\telse:\n\t\t\t\t\tcl = self.bin_spectra(pcl/self.weight) / self.fsky - self.bin_spectra(nl[:self.lmax+1])\n\t\t\tif analytic_errors and map2 is None:\n\t\t\t\tcl_tot = self.bin_spectra(pcl_tot/self.weight) / self.fsky\n\n\t\t# Analytic error bars estimation \n\t\t# TODO: moving this into another method?\n\t\tif analytic_errors:\n\t\t\tif map2 is None: # Auto\n\t\t\t\tcl_err = np.sqrt(2./((2. * self.lb + 1) * self.delta_ell * self.fsky)) * cl_tot\n\t\t\telse: # Cross\n\t\t\t\t# Extracting TOTAL pseudo-power spectra\n\t\t\t\tpcl_1 = hp.anafast(map1 * self.mask, lmax=self.lmax)\n\t\t\t\tpcl_2 = hp.anafast(map2 * self.mask, lmax=self.lmax)\n\t\t\t\t\n\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\tB2_1_ll = np.diag(self.B_1_l**2)\n\t\t\t\t\tB2_2_ll = np.diag(self.B_2_l**2)\n\n\t\t\t\tif self.MASTER:\n\t\t\t\t\tK_ll_1 = self.M_ll\n\t\t\t\t\tK_ll_2 = self.M_ll\n\t\t\t\t\t\n\t\t\t\t\tif self.pixwin:\n\t\t\t\t\t\tK_ll_1 = np.dot(K_ll_1, self.pw2_ll)\n\t\t\t\t\t\tK_ll_2 = np.dot(K_ll_2, self.pw2_ll)\n\t\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\t\tK_ll_1 = np.dot(K_ll_1, B2_1_ll)\n\t\t\t\t\t\tK_ll_2 = np.dot(K_ll_2, B2_2_ll)\n\n\t\t\t\t\tK_bb_1 = np.dot(np.dot(self.P_bl, K_ll_1), self.Q_lb)\n\t\t\t\t\tK_bb_2 = np.dot(np.dot(self.P_bl, K_ll_2), self.Q_lb)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tK_bb_inv_1 = self.inv_routine(K_bb_1)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"\\t! Problem with Coupling Matrix inversion: let me try a little trick ! \")\n\t\t\t\t\t\tK_bb_inv_1 = self.inv_routine(K_bb_1 + np.eye(K_bb_1.shape[0])*self.eps)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tK_bb_inv_2 = self.inv_routine(K_bb_2)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"\\t! Problem with Coupling Matrix inversion: let me try a little trick ! \")\n\t\t\t\t\t\tK_bb_inv_2 = self.inv_routine(K_bb_2 + np.eye(K_bb_2.shape[0])*self.eps)\n\n\t\t\t\t\t# K_bb_inv_1 = self.inv_routine(K_bb_1)\n\t\t\t\t\t# K_bb_inv_2 = self.inv_routine(K_bb_2)\n\n\t\t\t\t\tcl1 = np.dot(K_bb_inv_1, np.dot(self.P_bl, pcl_1))\n\t\t\t\t\tcl2 = np.dot(K_bb_inv_2, np.dot(self.P_bl, pcl_2))\n\n\n\t\t\t\telse:\n\t\t\t\t\tweight_1 = np.ones(self.lmax+1)\n\t\t\t\t\tweight_2 = np.ones(self.lmax+1)\n\n\t\t\t\t\tif self.pixwin:\n\t\t\t\t\t\tweight_1 *= self.pw2_l\n\t\t\t\t\t\tweight_2 *= self.pw2_l\n\t\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\t\tweight_1 *= np.diag(B2_1_ll)\n\t\t\t\t\t\tweight_2 *= np.diag(B2_2_ll)\n\n\t\t\t\t\tcl1 = np.dot(self.P_bl, pcl_1/weight_1) / self.fsky\n\t\t\t\t\tcl2 = np.dot(self.P_bl, pcl_2/weight_2) / self.fsky\n\n\t\t\t\tcl_err = np.sqrt(2./((2. * self.lb + 1) * self.delta_ell * self.fsky) * (cl**2 + cl1 * cl2))\n\n\t\t\treturn cl, cl_err\n\t\telse:\n\t\t\treturn cl", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def wavematch(w, wp, sl, wlimit=10):\n\n # first remove anything already in the self.wp from the sl list\n lines = []\n for x in sl:\n if x not in wp:\n lines.append(x)\n if not lines:\n return -1\n lines = np.array(lines)\n\n # find the best match\n dist = abs(lines - w)\n if dist.min() < wlimit:\n i = dist.argmin()\n else:\n return -1\n\n # return the values\n return lines[i]", "def rebin_spectra(source):\n # Unpack arguments using the 'source' label; all are assumed to be in km/s\n v_lo, v_hi, new_dv = velocity_ranges[source]\n # Load data cube\n data_filename = os.path.abspath(data_filepaths[source])\n cube = SpectralCube.read(data_filename)\n # Check units\n try:\n # See if the cube can be converted to Kelvins easily\n cube = cube.to(u.K)\n except:\n # Check if it looks like a temperature\n old_bunit = cube.header['BUNIT']\n if \"K (Ta*)\" in old_bunit:\n cube._unit = u.K\n print(f\"Data unit {cube.unit} assigned, based on the header BUNIT {old_bunit}.\")\n else:\n # Don't bother trying to fix it, leave it alone\n print(f\"Data units <{cube._unit}> aren't equivalent to Kelvins, leaving them alone\")\n # Get current channel width (np.diff should return an array of all the same value, so np.mean is overkill but it doesn't matter)\n old_dv = np.mean(np.diff(cube.spectral_axis))\n # Construct a box filter to average the channels\n # Filter width is number of channels; if rebinning from 0.1 km/s to 1 km/s, filter is 10 channels\n # Need to add km/s units to new_dv (old_dv already has units)\n filter_width = np.abs(((new_dv*u.km/u.s) / old_dv).decompose().to_value())\n # Round to nearest integer\n filter_width = np.around(filter_width, 0)\n # Make filter using astropy.convolution.Box1DKernel\n filter = Box1DKernel(filter_width)\n # Define the new spectral axis using the inclusive limits and the new channel width\n new_spectral_axis = np.arange(v_lo, v_hi+new_dv, new_dv) * u.km/u.s\n\n # Do the computationally intensive work\n print(\"Starting spectral smooth\")\n cube = cube.spectral_smooth(filter)\n print(\"Finished spectral smooth. Starting spectral rebin.\")\n cube = cube.spectral_interpolate(new_spectral_axis)\n print(\"Finished spectral rebin.\")\n # Create savename with \"rebin\" and the channel width inserted before the filetype suffix\n save_filename = data_filename.replace(\".fits\", f\".rebin{new_dv:d}kms.fits\")\n cube.write(save_filename, format='fits')", "def reinterp(self, lamb):\n mean, samples = self._get_mean_and_samples_attribute('reinterp')\n mean_val = mean(lamb)\n samp_val = [sk(mean_val.wavelength) for sk in samples]\n samp_transmissions = [sk.transmit for sk in samp_val]\n\n return self.__class__(mean_val.wavelength, mean_val.transmit,\n samp_transmissions, name=self.name,\n dtype=mean_val.dtype,\n unit=mean_val.wavelength_unit)", "def integrate(self, *w):\n # deal with subspectrums if necessary\n if not w:\n spectrum = self.spectrum\n interp = self.interp\n else:\n assert len(w) >= 2 and w[0] < w[\n 1], 'Error: Too few wavelengths or start wavelength is not shorter than the longest wavelength.'\n spectrum = self.sub_spectrum(w[0], w[1])\n # TODO: Decide if to use quad and interp1d obj for integration or not. trapz is faster & close in result\n # interp = interpolate.interp1d(spectrum[:, 0], spectrum[:, 1])\n # get the total number of discrete wavelengths as a bin limit\n # bin_limit = len(spectrum[:, 0])\n # integrate the power\n # power_f = integrate.quad(interp, spectrum[0, 0], spectrum[-1, 0], full_output=1, limit=bin_limit)\n power_f = integrate.trapz(spectrum[:, 1], spectrum[:, 0])\n return power_f # Units Watts/meters^2", "def Nishiyama09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def FindNormalizationWeights(wavelength,\n wave_resol,\n alpha_resol,\n beta_resol):\n alpha_weight = 1.0\n beta_weight = 1.0\n lambda_weight = 1.0\n\n # alpha psf weighting\n alpha_wave_cutoff = alpha_resol[0]\n alpha_a_short = alpha_resol[1]\n alpha_b_short = alpha_resol[2]\n alpha_a_long = alpha_resol[3]\n alpha_b_long = alpha_resol[4]\n if wavelength < alpha_wave_cutoff:\n alpha_weight = alpha_a_short + alpha_b_short * wavelength\n else:\n alpha_weight = alpha_a_long + alpha_b_long * wavelength\n\n # beta psf weighting\n beta_wave_cutoff = beta_resol[0]\n beta_a_short = beta_resol[1]\n beta_b_short = beta_resol[2]\n beta_a_long = beta_resol[3]\n beta_b_long = beta_resol[4]\n if wavelength < beta_wave_cutoff:\n beta_weight = beta_a_short + beta_b_short * wavelength\n else:\n beta_weight = beta_a_long + beta_b_long * wavelength\n\n # wavelength weighting\n wavecenter = wave_resol[0]\n a_ave = wave_resol[1]\n b_ave = wave_resol[2]\n c_ave = wave_resol[3]\n wave_diff = wavelength - wavecenter\n resolution = a_ave + b_ave * wave_diff + c_ave * wave_diff * wave_diff\n lambda_weight = wavelength / resolution\n weight = [alpha_weight, beta_weight, lambda_weight]\n return weight", "def value_at_wavelength(self, *wavelengths: float):\n self.__bounds_check(*wavelengths)\n for w in wavelengths:\n irradiance = float(self.interp(w))\n yield irradiance", "def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def search_peaks(wavelength, flux, smooth_points=20, lmin=0, lmax=0, fmin=0.5, fmax=3., \n emission_line_file=\"lineas_c89_python.dat\", brightest_line=\"Ha\", cut=1.2, \n check_redshift = 0.0003, only_id_lines=True, plot=True, verbose=True, fig_size=12): \n # Setup wavelength limits\n if lmin == 0 :\n lmin = np.nanmin(wavelength)\n if lmax == 0 :\n lmax = np.nanmax(wavelength)\n \n # Fit a smooth continuum\n #smooth_points = 20 # Points in the interval\n step = np.int(len(wavelength)/smooth_points) # step\n w_cont_smooth = np.zeros(smooth_points) \n f_cont_smooth = np.zeros(smooth_points) \n\n for j in range(smooth_points):\n w_cont_smooth[j] = np.nanmedian([wavelength[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))])\n f_cont_smooth[j] = np.nanmedian([flux[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))]) # / np.nanmedian(spectrum)\n #print j,w_cont_smooth[j], f_cont_smooth[j]\n\n interpolated_continuum_smooth = interpolate.splrep(w_cont_smooth, f_cont_smooth, s=0)\n interpolated_continuum = interpolate.splev(wavelength, interpolated_continuum_smooth, der=0)\n\n\n funcion = flux/interpolated_continuum\n \n # Searching for peaks using cut = 1.2 by default\n peaks = []\n index_low = 0\n for i in range(len(wavelength)):\n if funcion[i] > cut and funcion[i-1] < cut :\n index_low = i\n if funcion[i] < cut and funcion[i-1] > cut :\n index_high = i\n if index_high != 0 :\n pfun = np.nanmax([funcion[j] for j in range(len(wavelength)) if (j > index_low and j<index_high+1 )])\n peak = wavelength[funcion.tolist().index(pfun)]\n if (index_high - index_low) > 1 :\n peaks.append(peak)\n \n # Identify lines\n # Read file with data of emission lines: \n # 6300.30 [OI] -0.263 15 5 5 15\n # el_center el_name el_fnl lowlow lowhigh highlow highigh \n # Only el_center and el_name are needed\n el_center,el_name,el_fnl,el_lowlow,el_lowhigh,el_highlow,el_highhigh = read_table(emission_line_file, [\"f\", \"s\", \"f\", \"f\", \"f\", \"f\", \"f\"] )\n #for i in range(len(el_name)):\n # print \" %8.2f %9s %6.3f %4.1f %4.1f %4.1f %4.1f\" % (el_center[i],el_name[i],el_fnl[i],el_lowlow[i], el_lowhigh[i], el_highlow[i], el_highhigh[i])\n #el_center,el_name = read_table(\"lineas_c89_python.dat\", [\"f\", \"s\"] )\n\n # In case this is needed in the future...\n# el_center = [6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66]\n# el_fnl = [-0.263, -0.264, -0.271, -0.296, -0.298, -0.300, -0.313, -0.318, -0.320, -0.364, -0.374, -0.398, -0.400 ]\n# el_name = [\"[OI]\", \"[SIII]\", \"[OI]\", \"[NII]\", \"Ha\", \"[NII]\", \"HeI\", \"[SII]\", \"[SII]\", \"HeI\", \"[ArIII]\", \"[OII]\", \"[OII]\" ]\n\n # Search for the brightest line in given spectrum (\"Ha\" by default)\n peaks_flux = np.zeros(len(peaks))\n for i in range(len(peaks)):\n peaks_flux[i] = flux[wavelength.tolist().index(peaks[i])]\n Ha_w_obs = peaks[peaks_flux.tolist().index(np.nanmax(peaks_flux))] \n \n # Estimate redshift of the brightest line ( Halpha line by default)\n Ha_index_list = el_name.tolist().index(brightest_line)\n Ha_w_rest = el_center[Ha_index_list]\n Ha_redshift = (Ha_w_obs-Ha_w_rest)/Ha_w_rest\n if verbose: print(\"\\n> Detected %i emission lines using %8s at %8.2f A as brightest line!!\\n\" % (len(peaks),brightest_line, Ha_w_rest)) \n# if verbose: print \" Using %8s at %8.2f A as brightest line --> Found in %8.2f with a redshift %.6f \" % (brightest_line, Ha_w_rest, Ha_w_obs, Ha_redshift)\n \n # Identify lines using brightest line (Halpha by default) as reference. \n # If abs(wavelength) > 2.5 we don't consider it identified.\n peaks_name = [None] * len(peaks)\n peaks_rest = np.zeros(len(peaks))\n peaks_redshift = np.zeros(len(peaks))\n peaks_lowlow = np.zeros(len(peaks)) \n peaks_lowhigh = np.zeros(len(peaks))\n peaks_highlow = np.zeros(len(peaks))\n peaks_highhigh = np.zeros(len(peaks))\n\n for i in range(len(peaks)):\n minimo_w = np.abs(peaks[i]/(1+Ha_redshift)-el_center)\n if np.nanmin(minimo_w) < 2.5:\n indice = minimo_w.tolist().index(np.nanmin(minimo_w))\n peaks_name[i]=el_name[indice]\n peaks_rest[i]=el_center[indice]\n peaks_redshift[i] = (peaks[i]-el_center[indice])/el_center[indice]\n peaks_lowlow[i] = el_lowlow[indice]\n peaks_lowhigh[i] = el_lowhigh[indice]\n peaks_highlow[i] = el_highlow[indice]\n peaks_highhigh[i] = el_highhigh[indice]\n if verbose: print(\"%9s %8.2f found in %8.2f at z=%.6f |z-zref| = %.6f\" % (peaks_name[i], peaks_rest[i],peaks[i], peaks_redshift[i],np.abs(peaks_redshift[i]- Ha_redshift) ))\n #print peaks_lowlow[i],peaks_lowhigh[i],peaks_highlow[i],peaks_highhigh[i]\n # Check if all redshifts are similar, assuming check_redshift = 0.0003 by default\n # If OK, add id_peaks[i]=1, if not, id_peaks[i]=0 \n id_peaks=[]\n for i in range(len(peaks_redshift)):\n if np.abs(peaks_redshift[i]-Ha_redshift) > check_redshift:\n if verbose: print(\" WARNING!!! Line %8s in w = %.2f has redshift z=%.6f, different than zref=%.6f\" %(peaks_name[i],peaks[i],peaks_redshift[i], Ha_redshift))\n id_peaks.append(0)\n else:\n id_peaks.append(1)\n\n if plot:\n plt.figure(figsize=(fig_size, fig_size/2.5)) \n plt.plot(wavelength, funcion, \"r\", lw=1, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"Flux / continuum\")\n \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.axhline(y=cut, color='k', linestyle=':', alpha=0.5) \n for i in range(len(peaks)):\n plt.axvline(x=peaks[i], color='k', linestyle=':', alpha=0.5)\n label=peaks_name[i]\n plt.text(peaks[i], 1.8, label) \n plt.show() \n \n continuum_limits = [peaks_lowlow, peaks_lowhigh, peaks_highlow, peaks_highhigh]\n \n if only_id_lines:\n peaks_r=[]\n peaks_name_r=[]\n peaks_rest_r=[]\n peaks_lowlow_r=[]\n peaks_lowhigh_r=[]\n peaks_highlow_r=[]\n peaks_highhigh_r=[]\n \n for i in range(len(peaks)): \n if id_peaks[i] == 1:\n peaks_r.append(peaks[i])\n peaks_name_r.append(peaks_name[i])\n peaks_rest_r.append(peaks_rest[i])\n peaks_lowlow_r.append(peaks_lowlow[i])\n peaks_lowhigh_r.append(peaks_lowhigh[i])\n peaks_highlow_r.append(peaks_highlow[i])\n peaks_highhigh_r.append(peaks_highhigh[i])\n continuum_limits_r=[peaks_lowlow_r,peaks_lowhigh_r,peaks_highlow_r,peaks_highhigh_r] \n\n return peaks_r, peaks_name_r , peaks_rest_r, continuum_limits_r \n else: \n return peaks, peaks_name , peaks_rest, continuum_limits", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def interp_mmw(files, nelements=10000, window_function=None, cutoff=None):\n full_freq = list()\n full_int = list()\n \n print(\"Parsing files.\")\n for index, file in enumerate(files):\n frequency, intensity, settings = parse_data(file)\n intensity = fft_routines.fft_filter(intensity, window_function, cutoff)\n if index == 0:\n minx = np.min(frequency)\n maxx = np.max(frequency)\n else:\n curr_minx = np.min(frequency)\n curr_maxx = np.max(frequency)\n if curr_minx < minx:\n minx = curr_minx\n if curr_maxx > maxx:\n maxx = curr_maxx\n full_freq.append(frequency)\n full_int.append(intensity)\n \"\"\"\n Stitching the spectrum together using a weighted\n Shepard interpolation.\n \"\"\"\n print(\"Lowest frequency: \" + str(minx))\n print(\"Highest frequency: \" + str(maxx))\n print(\"Performing Shepard interpolation\")\n frequency = np.linspace(minx, maxx, nelements)\n interp_y = np.zeros(nelements)\n \n # Loop over each frequency bin\n for index, interp_freq in enumerate(frequency):\n if (index / len(frequency)) > 0.5:\n print(\"50% done.\")\n # Calculate the Shepard interpolation at the given frequency point\n interp_y[index] = interpolation.eval_shep_interp(full_freq, full_int, interp_freq, p=16.)\n \n df = pd.DataFrame(data=list(zip(frequency, interp_y)), columns=[\"Frequency\", \"Intensity\"])\n return df", "def radiance_map(file, config, vmax=4200, levels=20, typ=''):\n \n # Select data from configuration \n azimuths = config['skymap'][:, 0] # +180 # azimuths\n zeniths = config['skymap'][:, 1] # zeniths\n\n if typ == 'sim':\n # look for wavelength index in array\n waves_sim = dataset.attrs['simulated_Columns'].split('nm')[0].split('[')[1].split(\n ']')[0].split(',')\n waves = np.asarray(list(map(int, waves_sim)))\n wave_indx = np.where(waves == wave)\n try:\n wave_indx = np.int(wave_indx[0][0])\n except:\n print(\"Wavelength is not in dataset\")\n z = dataset.simulated[:, wave_indx, time_indx]\n\n elif typ == 'meas':\n wave_indx = int((config['wavelength'] - 250 - config['wave_correction']) / 0.446)\n with h5py.File(file, 'r') as data:\n z = data['data'][:, wave_indx]\n else:\n print('Select a input data type(sim or meas)')\n\n # Add values in the origin to close the surface interpolation\n azimuths = np.append(azimuths, [270, 0, 0, 0, 0, 0, 0, 0])\n zeniths = np.append(zeniths, [0, 12, 24, 36, 48, 60, 72, 84])\n z = np.append(z, [z[0], z[3], z[9], z[19], z[33], z[51], z[73], z[99]])\n\n # Convert x to radians\n azimuths = np.radians(azimuths)\n zeniths = np.radians(zeniths)\n\n # Remove dead channels of the dataset\n azimuths = np.delete(azimuths, config['dead_fibre'])\n zeniths = np.delete(zeniths, config['dead_fibre'])\n z = np.delete(z, config['dead_fibre'])\n\n # Set up a regular grid of interpolation point\n thetai, ri = np.linspace(azimuths.min(), azimuths.max(),\n num=len(azimuths)), \\\n np.linspace(zeniths.min(), zeniths.max(), num=len(zeniths))\n\n ri, thetai = np.meshgrid(ri, thetai, indexing='ij')\n\n # zi = scipy.interpolate.griddata((azimuths, zeniths), z, (thetai, ri),\n # method='linear')\n\n rbf = scipy.interpolate.Rbf(azimuths, zeniths, z, fucntion='gaussian',\n epsilon=0.05)\n\n ZI = rbf(thetai, ri)\n\n if typ == 'sim':\n name = str(dataset.time[time_indx].values) # ''\n else:\n name = 'testing' #str(dataset.time[time_indx].values)\n\n # Create the directory to save the results\n # os.makedirs(os.path.dirname(config['path_note'] + '/figures/'),\n # exist_ok=True)\n if vmax == 'default':\n vmax = 4200\n else:\n vmax = vmax\n\n # Plot the dataset\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cmap = 'Spectral_r' # 'rainbow'\n a = plt.contourf(thetai, ri, ZI, levels, cmap=cmap, vmin=0,\n vmax=vmax) # , vmax=4932)\n plt.title('{} UTC {}nm'.format(name, config['wavelength']))\n plt.axis([0, 2*np.pi, 0, 1.48])\n\n plt.scatter(azimuths, zeniths, cmap=cmap, s=1)\n ax.grid(False)\n ax.set_theta_zero_location(\"N\") # Set the direction of polar plot\n ax.set_theta_direction(1) # Set the increase direction on azimuth angles\n # (-1 to clockwise, 1 counterclockwise)\n cbar = plt.colorbar(a)\n cbar.set_label(\"counts\", rotation=90)\n\n # if typ == 'sim':\n # plt.savefig(\n # 'figures/skymap/simulated/skymap{}nm_{}UTC_sim.jpeg'.format(wave,\n # name),\n # dpi=300)\n # plt.show();\n # else:\n # plt.savefig(\n # 'figures/skymap/measured/skymap{}nm_{}UTC_meas.jpeg'.format(wave,\n # name),\n # dpi=300)", "def reinterp(self, lamb):\n _wavelength = self._get_filter_in_units_of(lamb)\n _lamb = _drop_units(lamb)\n try:\n _unit = str(lamb.unit)\n except Exception:\n _unit = self.wavelength_unit\n ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)\n return self.__class__(_lamb, ifT, name=self.name, dtype=self.dtype,\n unit=_unit)", "def plot_wavelength_vs_distance(solver, WL_range=[400, 1350], ax=None,\n norm=None):\n\n if ax is None:\n ax = plt.gca()\n\n if norm is None:\n norm = np.max(np.abs(solver.AW)**2)\n\n lIW = np.fliplr(\n 10 * np.log10(np.abs(solver.AW)**2 / norm,\n where=(np.abs(solver.AW)**2 > 0)))\n WL = 2 * np.pi * c / solver.W # wavelength grid\n WL_asc = np.flip(WL, ) # ascending order for interpolation\n iis = np.logical_and(WL_asc > WL_range[0],\n WL_asc < WL_range[1]) # indices of interest\n\n WL_asc = WL_asc[iis]\n lIW = lIW[:, iis]\n\n interpolator = interp2d(WL_asc, solver.Z, lIW)\n newWL = np.linspace(np.min(WL_asc), np.max(WL_asc), lIW.shape[1])\n toshow = interpolator(newWL, solver.Z)\n\n ax.imshow(toshow, origin='lower', aspect='auto', cmap=\"magma\",\n extent=[np.min(WL_asc), np.max(WL_asc), 0, np.max(solver.Z)],\n vmin=-40)\n ax.set_xlabel(\"Wavelength [nm]\")\n ax.set_ylabel(\"Distance [m]\")\n return ax", "def _lookup_bands(platform, wavelengths):\r\n wave_bands = {\r\n Platform.Landsat5: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs\": \"6\",\r\n \"swir2\": \"7\",\r\n },\r\n Platform.Landsat7: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs1\": \"6_VCID_1\",\r\n \"tirs2\": \"6_VCID_2\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n },\r\n Platform.Landsat8: {\r\n \"aerosol\": \"1\",\r\n \"blue\": \"2\",\r\n \"green\": \"3\",\r\n \"red\": \"4\",\r\n \"nir\": \"5\",\r\n \"swir1\": \"6\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n \"cirrus\": \"9\",\r\n \"tirs1\": \"10\",\r\n \"tirs2\": \"11\",\r\n },\r\n Platform.Sentinel2: {\r\n \"aerosol\": \"0\",\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"rededge1\": \"4\",\r\n \"rededge2\": \"5\",\r\n \"rededge3\": \"6\",\r\n \"nir\": \"7\",\r\n \"rededge4\": \"8\",\r\n \"watervapor\": \"9\",\r\n \"cirrus\": \"10\",\r\n \"swir1\": \"11\",\r\n \"swir2\": \"12\",\r\n },\r\n }\r\n\r\n return [wave_bands[platform][wavelength.lower()] for wavelength in wavelengths]", "def radial_velocity(wv_obj, fx_obj, sig_obj, wv_std, fx_std, sig_std, obj_name, std_name, rv_std, rv_std_err, order,\n xcorr_width, cut, cutstart, cutend):\n\n # The more random iterations, the better... but it takes longer\n n_iter = 1000\n\n # Step 1: Fix the spectra:\n # * Select only the region in which they overlap\n # * Make a new stretched wavelength array (for sub-pixel precision work)\n # * Interpolate the data onto the new wavelength array\n # * Remove large scale slopes so we only compare line and band features\n\n # Find where standard and object overlap ---------------\n wv_min = max([min(wv_std), min(wv_obj)])\n wv_max = min([max(wv_std), max(wv_obj)])\n\n n_pix_std = len(wv_std)\n\n # Creates ln standard wavelength array ---------------------------------\n # AR 2013.0423 The wavelength array only covers the overlap region. Also, I'm folding the rebinning by 10 into this statement.\n acoef_std = (n_pix_std * 10 - 1) / (math.log(wv_max) - math.log(wv_min))\n bcoef_std = (n_pix_std * 10) - (acoef_std * math.log(wv_max))\n\n arr = np.arange(n_pix_std * 10) + 1\n wv_ln_std = np.exp((arr - bcoef_std) / acoef_std)\n\n # AR 2012.1018: Find the conversion between pixels and velocity. This will vary from instrument\n # to instrument and spectral order to spectral order, so we should preferentially calculate this\n # based on the actual input spectrum.\n # AR 2013.0422: Change the calculation to happen AFTER the corrected wavelength scale has been made\n # Find the average pixel/spectrum offset\n # Note: even though it's called micron_per_pix, it will still work if the wavelengths are\n # angstroms instead (it really converts <wavelength unit> to km/s)\n\n # Interpolate data onto same ln wavelength scale -------------------------------\n\n fx_interp_std = np.interp(wv_ln_std, wv_std, fx_std)\n fx_interp_obj = np.interp(wv_ln_std, wv_obj, fx_obj)\n sig_interp_std = np.interp(wv_ln_std, wv_std, sig_std) # AR 2012.1018 Also need to rebin sig\n sig_interp_obj = np.interp(wv_ln_std, wv_obj, sig_obj) # AR 2012.1018 Also need to rebin sig\n\n # Rebin Data ----------------------------\n\n wv_arr_std = np.asarray(wv_ln_std, dtype=float)\n fx_arr_obj = np.asarray(fx_interp_obj, dtype=float)\n fx_arr_std = np.asarray(fx_interp_std, dtype=float)\n sig_arr_obj = np.asarray(sig_interp_obj, dtype=float)\n sig_arr_std = np.asarray(sig_interp_std, dtype=float)\n\n datalen = len(fx_arr_obj)\n\n # Step 2: Measure vsini:\n # Note that as of 2015.0605, this doesn't actually work.\n\n # AR 2014.0922: For vsini:\n # In a loop:\n # Take the standard spectrum\n # broaden it to width X\n # autocorrelate,\n # measure width of gaussian Y (this is supposed to give you a means of translating between width-of-cross-correlation and vsini)\n # Fit function solving Y for X.\n # For each cross correlation of object and standard:\n # Determine vsini\n\n pix_scale = (2.99792458 * 10 ** 5) / acoef_std\n\n # vsinirange = [1,2,5,10,20,30,40,50,60,80,100,100]\n # widthrange = []\n # for v in vsinirange:\n # # Make convolution kernel for v km/s\n # kernel = lsf_rotate(pix_scale,v)\n # # Broaden the standard spectrum\n # fx_obj_wide = np.correlate(fx_arr_obj, kernel, mode='same')\n # # Rectify the spectrum\n # fx_obj_orig = (fx_arr_obj - np.mean(fx_arr_obj))/np.std(fx_arr_obj,ddof=1)\n # fx_obj_wide = (fx_obj_wide - np.mean(fx_obj_wide))/np.std(fx_obj_wide,ddof=1)\n #\n # # Remove a cubic (flatten the spectrum)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_wide)\n # fx_obj_wide = fx_obj_wide - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_orig)\n # fx_obj_orig = fx_obj_orig - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n #\n # # Cross-correlate the spectrum with its broadened self\n # ycorr = np.correlate(fx_obj_orig, fx_obj_wide, mode='full')\n # # Now determine where the peak is (should be near 0)\n # length = len(ycorr)\n # xcorr = np.arange(length) - length//2\n # xmid = np.argmax(ycorr)\n # ymax = np.max(ycorr)\n # # Chop out just the portion of the array near the peak\n # xcorr_min=xmid-xcorr_width\n # xcorr_max=xmid+xcorr_width\n # ycorr1=ycorr[xcorr_min:xcorr_max]\t#isolate section of array with gaussian\n # xcorr1=xcorr[xcorr_min:xcorr_max] #isolate the same section of the pixel range\n #\n # # set up initial values for gaussian fitting via chi2\n # sig = 10\n # sky = np.min(ycorr1)/1.2\n # # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n # sky2 = (ycorr1[-1]-ycorr1[0])/(xcorr1[-1]-xcorr1[0])\n # lnamp = np.log(ymax/1.2-sky)\t# guess some values\n # mean = xcorr[xmid]\n #\n # amp = np.exp(lnamp)\n # sig2 = sig**2\n # # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n # def chi2(p):\t#define gaussian function for fitting\n # sig2=p[2] ** 2\n # m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4]*xcorr1\n # return (ycorr1 - m)\n #\n # # Fit the gaussian.\n # popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n # lnamp, mean, sig, sky, sky2 = popt\n #\n # amp = np.exp(lnamp)\n # # record the width\n # widthrange.append(sig)\n #\n # # Plot all the widths to get a width-vsini curve\n # vsinicoeff,popt = op.curve_fit(quartic,np.asarray(widthrange),np.asarray(vsinirange))\n #\n # relationx = np.arange(50,200,1)\n # relationy = vsinicoeff[0]+vsinicoeff[1]*relationx+vsinicoeff[2]*relationx**2+vsinicoeff[3]*relationx**3+vsinicoeff[4]*relationx**4\n # figv = plt.figure(1)\n # axv = figv.add_subplot(211)\n # axv.scatter(widthrange,vsinirange)\n # axv.plot(relationx,relationy)\n # #ax.text(70,100,\"{0:} {1:} {2:} {3:} {4:}\".format(vsinicoeff))\n\n # 3. Cross-correlate the data, using n_iter trials:\n # * Generate two random gaussian noises scaled to the uncertainty on the fluxes\n # * Apply those gaussian noises to the standard and target stars\n # * Cross-correlate the standard and target stars\n # * Find and then cut out just the part of the cross-correlation curve near the maximum\n # * Set up gaussian\n # * Fit gaussian to that center part\n # * Save fitted parameters (pixel shift aka mean of gaussian, width aka stddev of gaussian)\n # * Repeat n_iter times\n\n # Cross correlation loop --------------------------------\n pix_shift = np.array([]) # initialize array for pixel shift values\n pix_width = np.zeros(n_iter) # initialize array for pixel width values\n l = 0\n\n # using the xrange generator rather than making a full list saves memory\n while len(pix_shift) < n_iter:\n # prepare the randomized data\n # GETTING ARRAYS READY FOR CROSS CORRELATION\n\n\n # Randomize noise:\n # create gaussian distribution of random numbers b/t 1 and -1, multiply err by numbers, add numbers to flux\n # I have drastically simplified the arrays here AR 2013.0319\n # AR 2013.0318: There was a problem, previously: noise was a fixed value, not linked to the known error values\n\n # AR 2013.0321: Speed fix. Rather than step through the array and generate one\n # normally-distributed error value scaled to the SNR at that point, I will generate an\n # array of normally-distributed error values scaled to 1, and then multiply by the SNR:\n # One array generation, one array multiplication.\n\n rand_dist = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n rand_dist2 = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n\n fx_temp_obj = np.asarray(fx_arr_obj + rand_dist * sig_arr_obj)\n fx_temp_std = np.asarray(fx_arr_std + rand_dist2 * sig_arr_std)\n mean_obj = np.mean(fx_temp_obj)\n mean_std = np.mean(fx_temp_std)\n stddev_obj = np.std(fx_temp_obj, ddof=1)\n stddev_std = np.std(fx_temp_std, ddof=1)\n\n # Regularize data (subtract mean, divide by std dev) (Should definitely be done AFTER noise was added)\n fx_reg_temp_obj = fx_temp_obj - mean_obj\n fx_reg_temp_obj = fx_reg_temp_obj / stddev_obj\n fx_reg_temp_std = fx_temp_std - mean_std\n fx_reg_temp_std = fx_reg_temp_std / stddev_std\n\n # curve fit - remove a cubic AR 2012.1113\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_obj)\n fx_reg_temp_obj = fx_reg_temp_obj - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_std)\n fx_reg_temp_std = fx_reg_temp_std - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n\n # CROSS CORRELATION\n\n # compute the cross-correlation between the two spectra\n\n ycorr = np.correlate(fx_reg_temp_obj, fx_reg_temp_std, mode='full')\n # time required: 0.045 seconds average\n\n # http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python\n # conv1 = np.zeros(datalen * 2)\n # conv1[datalen/2:datalen/2+datalen] = fx_reg_temp_obj\n # conv2 = fx_reg_temp_std[::-1]\n # ycorr = signal.fftconvolve(conv1,conv2, mode='valid')\n # time required: 0.006 seconds average, but it segfaults by the third try.\n\n ## slight smoothing AR 2013.0315\n # ycorr = scipy.ndimage.filters.gaussian_filter1d(ycorr,11)\n\n # create the x offset axis (same length as ycorr, with 0 in the MIDDLE)\n length = len(ycorr)\n xcorr = np.arange(length) - length // 2\n # AR 2012.1126 Select a tiny piece around the maximum to fit with a gaussian.\n xmid = np.argmax(ycorr)\n ymax = np.max(ycorr)\n # now take just the portion of the array that matters\n xcorr_min = int(xmid - xcorr_width)\n xcorr_max = int(xmid + xcorr_width)\n ycorr1 = ycorr[xcorr_min:xcorr_max] # isolate section of array with gaussian\n xcorr1 = xcorr[xcorr_min:xcorr_max] # isolate the same section of the pixel range\n ycorr2 = ycorr[xcorr_min - 50:xcorr_max + 50]\n xcorr2 = xcorr[xcorr_min - 50:xcorr_max + 50]\n\n # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n def chi2(p): # define gaussian function for fitting\n sig2 = p[2] ** 2\n m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4] * xcorr1\n return (ycorr1 - m)\n\n # set up initial values for chi2\n sig = 10\n sky = np.min(ycorr1) / 1.2\n # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n sky2 = (ycorr1[-1] - ycorr1[0]) / (xcorr1[-1] - xcorr1[0])\n lnamp = np.log(ymax / 1.2 - sky) # guess some values\n mean = xcorr[xmid]\n\n amp = np.exp(lnamp)\n sig2 = sig ** 2\n\n popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n lnamp, mean, sig, sky, sky2 = popt\n\n amp = np.exp(lnamp)\n\n # print_num=len(pix_shift)%100\n print_num = l % 100\n if print_num == 0:\n ## Uncomment the following to make a plot every 500 fits.\n # fig = plt.figure(l)\n # ax = fig.add_subplot(111)\n # my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mean) ** 2) / sig**2))) + sky + sky2 * xcorr1\n # ax.plot(xcorr1,my_gauss,'r--')\n # ax.plot(xcorr2,ycorr2,'#000000')\n # ax.plot(xcorr1,ycorr1-my_gauss,'#00CC00')\n ##if abs(mean - xcorr[xmid]) > 5:\n ## print \"Mean is off\",mean,xcorr[xmid]\n # figname='rv_{0:}_{1:}_{2:}_{3:}.png'.format(std_name,obj_name,order,l)\n # ax.set_xlim(xcorr[xcorr_min-50],xcorr[xcorr_max+50])\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n print\n \"amp={0: 12.4f} mu={1: 10.4f} sig={2: 9.4f} sky={3: 11.4f} sky2={4: 8.4f} n_entries={5:}\".format(amp,\n mean,\n sig,\n sky,\n sky2,\n len(\n pix_shift))\n\n l += 1\n if (cut == 0) | (mean > np.float(cutstart)) & (mean < np.float(cutend)):\n pix_shift = np.append(pix_shift, mean)\n # if ier < 5:\n # I'm calculating the vsini now because I need errors, and the vsini calculation is not linear.\n # pix_width[l] = vsinicoeff[0] + vsinicoeff[1] * sig + vsinicoeff[2] * sig**2 + vsinicoeff[3] * sig**3 + vsinicoeff[4] * sig**4\n\n # End cross correlation loop ---------------------------------\n\n # 4. Find the RV\n # All 5000 rv fits have been calculated and stored in arrays\n # 4a. Cut out outlier RVs. Useful if the cross-correlation produces occasional bad results. Use cutstart and cutend to force the code to only fit a gaussian to a certain region. Don't over-use this to force the result you want, though.\n # 4b. Compute the mean pixel shift and pixel shift uncertainty.\n # 4c. Convert pixel shift into RV\n # 4d. Shift the wavelength array appropriately - all lines should now line up.\n\n ## Uncomment this to print out an example cross-correlation diagram\n # fig = plt.figure(2)\n # ax = fig.add_subplot(111)\n # ax.plot(xcorr,ycorr,'k')\n # figname='rv_{0:}_{1:}_{2:}_xcorr.png'.format(std_name,obj_name,order)\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n\n # Turn the list of pixel shifts into a numpy array\n pix_shift = np.asarray(pix_shift)\n\n # 4a. Cut out outliers from the pixel shift\n if cut == 1:\n pix_shift = pix_shift[np.where((pix_shift > np.float(cutstart)) & (pix_shift < np.float(cutend)))]\n\n # 4b. Compute the mean pixel shift (rv value) and pixel shift uncertainty (RV uncertainty).\n\n print\n l, len(pix_shift), np.float(len(pix_shift)) / np.float(n_iter) * 100.0\n\n mu = np.mean(pix_shift)\n sigma = np.std(pix_shift, ddof=1)\n\n # vsini = np.mean(pix_width)\n # vsini_err = np.std(pix_width,ddof=1)\n\n # axh = figv.add_subplot(212)\n # n, bins, patches=axh.hist(pix_width,bins=30,normed=1.0,facecolor='green',align='mid')\n # figv.savefig('vsiniplot.png')\n # plt.clf()\n # plt.close()\n\n # 4c. Transform pixel shift to shift in radial velocity\n\n # AR 2013.0423: The actually appropriate method requires a speed-of-light correction. This works for both angstroms and microns.\n rv_meas = (2.99792458 * 10 ** 5 * mu) / acoef_std\n rv_meas_err = (2.99792458 * 10 ** 5 * sigma) / acoef_std\n\n # 4d. Apply shift to arrays\n wv_rvcorr_obj = wv_arr_std * (1 - rv_meas / (2.99792458 * 10 ** 5))\n\n ## 5. Create plots ---------------------------------\n # The plots are the only reason find_rv.py needs to know the names of either star, or the RV of the standard.\n\n # Plot object and standard so you can clearly see that shift exists --------------------------------\n fig = plt.figure(1)\n\n # AR 2013.0703 Regularize the spectra for display purposes in the final graph\n # I'm using the mean and stddev of the last random-added attempt so it won't be perfect...\n fx_reg_obj = fx_arr_obj - mean_obj\n fx_reg_obj = fx_reg_obj / stddev_obj\n fx_reg_std = fx_arr_std - mean_std\n fx_reg_std = fx_arr_std / stddev_std\n\n # Plots target and standard with shift applied\n ax1 = fig.add_subplot(311)\n ax1.plot(wv_rvcorr_obj, fx_reg_obj, 'red')\n ax1.plot(wv_arr_std, fx_reg_std, 'blue')\n ax1.set_xlabel('wavelength (microns)')\n ax1.set_ylabel('normalized flux')\n target = 'Target: %s' % (obj_name)\n standard = 'Standard: %s' % (std_name)\n ax1.annotate(target, xy=(.7, .9), xycoords='axes fraction', xytext=(.6, .9), textcoords='axes fraction',\n color='red')\n ax1.annotate(standard, xy=(.7, .8), xycoords='axes fraction', xytext=(.6, .8), textcoords='axes fraction',\n color='blue')\n\n sig2 = sig ** 2\n my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mu) ** 2) / sig2))) + sky + sky2 * xcorr1\n\n # Plots example of gaussian fit to cross correlation function\n ax2 = fig.add_subplot(312)\n ax2.plot(xcorr1, ycorr1, 'k.')\n ax2.plot(xcorr1, my_gauss, 'r--', linewidth=2)\n ax2.plot(xcorr1, ycorr1 - my_gauss, '#00CC00')\n ax2.set_xlabel('example of fit to cross correlation function')\n ax2.set_xlim(xcorr[xcorr_min - 50], xcorr[xcorr_max + 50])\n # print pix_shift\n\n\n ## Plot histogram of pixel shift values --------------------------------\n ax3 = fig.add_subplot(313)\n n, bins, patches = plt.hist(pix_shift, bins=30, normed=1.0, facecolor='green', align='mid')\n # Plot best fit gaussian over histogram\n y = mlab.normpdf(bins, mu, sigma)\n ax3.plot(bins, y, 'r--', linewidth=2)\n ax3.set_xlabel('radial velocity of target (pixels)')\n ax3.set_ylabel('frequency (normalized)')\n rad = 'RV = %.3f +/- %.3f' % (rv_meas, rv_meas_err)\n corr = 'RV (corr) = %.3f +/- %.3f' % (rv_std + rv_meas, (rv_std_err ** 2 + rv_meas_err ** 2) ** (0.5))\n # vsinistr = 'VsinI = %.3f +/- %.3f' % (vsini,vsini_err)\n ax3.annotate(rad, xy=(.66, .9), xycoords='axes fraction', xytext=(.66, .9), textcoords='axes fraction',\n color='black')\n ax3.annotate(corr, xy=(.6, .8), xycoords='axes fraction', xytext=(.60, .8), textcoords='axes fraction',\n color='black')\n # ax3.annotate(vsinistr,xy=(.6,.6),xycoords='axes fraction',xytext=(.60,.6),textcoords='axes fraction',color='black')\n ax3.annotate('{0:+5.2f} {1: 5.2f}'.format(mu, sigma), xy=(.05, .9), xycoords='axes fraction', xytext=(.05, .9),\n textcoords='axes fraction', color='black')\n ax3.annotate('{0:5.3f} km/s/pix'.format((2.99792458 * 10 ** 5) / acoef_std), xy=(.05, .8), xycoords='axes fraction',\n xytext=(.05, .8), textcoords='axes fraction', color='black')\n fig.subplots_adjust(hspace=.3)\n\n figname = 'rv_%s_%s_%d.png' % (std_name, obj_name, order)\n fig.savefig(figname)\n fig.clf()\n plt.close()\n\n # plt.figure(l+1)\n # plt.hist(pix_shift)\n\n # END RADIAL VELOCITY FUNCTION -----------------------------------------\n return rv_meas, rv_meas_err", "def resample(wavelength, spectra, resampling_ratio):\n\n new_length = int(np.round(wavelength.size * resampling_ratio))\n spectra_, wavelength_ = scipy.signal.resample(spectra, new_length, wavelength)\n return wavelength_, spectra_", "def Fitzpactrick09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def reduce_resolution(wi, fi, fwhm0=0.55, sigma_floor=0.2):\n\n # all in AA\n w_lick_res = (4000., 4400., 4900., 5400., 6000.)\n lick_res = (11.5, 9.2, 8.4, 8.4, 9.8) # FWHM in AA\n\n w = np.asarray(wi)\n flux = np.atleast_2d(fi)\n\n # Linear interpolation of lick_res over w\n # numpy interp does constant instead of extrapolation\n # res = np.interp(w, w_lick_res, lick_res)\n\n # spline order: 1 linear, 2 quadratic, 3 cubic ...\n from scipy.interpolate import InterpolatedUnivariateSpline\n res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w)\n\n # Compute width from fwhm\n const = 2. * np.sqrt(2. * np.log(2)) # conversion fwhm --> sigma\n lick_sigma = np.sqrt((res ** 2 - fwhm0 ** 2)) / const\n\n # Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2))\n flux_red = np.zeros(flux.shape, dtype=flux.dtype)\n\n for i, sigma in enumerate(lick_sigma):\n maxsigma = 3. * sigma\n # sampling floor: min (0.2, sigma * 0.1)\n delta = min(sigma_floor, sigma * 0.1)\n delta_wj = np.arange(-maxsigma, + maxsigma, delta)\n wj = delta_wj + w[i]\n for k, fk in enumerate(flux):\n fluxj = np.interp(wj, w, fk, left=0., right=0.)\n flux_red[k, i] = np.sum(fluxj * delta * np.exp(-0.5 * (delta_wj / sigma) ** 2))\n\n flux_red /= lick_sigma * const\n\n return flux_red.reshape(np.shape(fi))", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def resample(self, new_dispersion, inplace=False, force=False):\n\n # Mapping of the SpecOneD object variables to the function\n # variables\n\n old_spec_wavs = self.dispersion\n spec_fluxes = self.flux\n if self.flux_err is not None:\n spec_errs = self.flux_err\n else:\n spec_errs = None\n\n new_spec_wavs = new_dispersion\n\n if force:\n indices = np.where((new_spec_wavs < old_spec_wavs.max()) &\n (new_spec_wavs > old_spec_wavs.min()))\n new_spec_wavs = new_spec_wavs[indices]\n\n # Arrays of left-hand sides and widths for the old and new bins\n spec_widths = np.zeros(old_spec_wavs.shape[0])\n spec_lhs = np.zeros(old_spec_wavs.shape[0])\n spec_lhs[0] = old_spec_wavs[0]\n spec_lhs[0] -= (old_spec_wavs[1] - old_spec_wavs[0]) / 2\n spec_widths[-1] = (old_spec_wavs[-1] - old_spec_wavs[-2])\n spec_lhs[1:] = (old_spec_wavs[1:] + old_spec_wavs[:-1]) / 2\n spec_widths[:-1] = spec_lhs[1:] - spec_lhs[:-1]\n\n filter_lhs = np.zeros(new_spec_wavs.shape[0] + 1)\n filter_widths = np.zeros(new_spec_wavs.shape[0])\n filter_lhs[0] = new_spec_wavs[0]\n filter_lhs[0] -= (new_spec_wavs[1] - new_spec_wavs[0]) / 2\n filter_widths[-1] = (new_spec_wavs[-1] - new_spec_wavs[-2])\n filter_lhs[-1] = new_spec_wavs[-1]\n filter_lhs[-1] += (new_spec_wavs[-1] - new_spec_wavs[-2]) / 2\n filter_lhs[1:-1] = (new_spec_wavs[1:] + new_spec_wavs[:-1]) / 2\n filter_widths[:-1] = filter_lhs[1:-1] - filter_lhs[:-2]\n\n if filter_lhs[0] < spec_lhs[0] or filter_lhs[-1] > spec_lhs[-1]:\n\n raise ValueError(\"spectres: The new wavelengths specified must fall\"\n \"within the range of the old wavelength values:\",\n filter_lhs[0], spec_lhs[0], filter_lhs[-1],\n spec_lhs[-1], \"\\n Consider setting force=True\")\n\n # Generate output arrays to be populated\n res_fluxes = np.zeros(spec_fluxes[..., 0].shape + new_spec_wavs.shape)\n\n if spec_errs is not None:\n if spec_errs.shape != spec_fluxes.shape:\n raise ValueError(\n \"If specified, spec_errs must be the same shape\"\n \"as spec_fluxes.\")\n else:\n res_fluxerrs = np.copy(res_fluxes)\n\n start = 0\n stop = 0\n\n # Calculate new flux and uncertainty values, loop over new bins\n for j in range(new_spec_wavs.shape[0]):\n\n # Find first old bin which is partially covered by the new bin\n while spec_lhs[start + 1] <= filter_lhs[j]:\n start += 1\n\n # Find last old bin which is partially covered by the new bin\n while spec_lhs[stop + 1] < filter_lhs[j + 1]:\n stop += 1\n\n # If new bin is fully within one old bin these are the same\n if stop == start:\n\n res_fluxes[..., j] = spec_fluxes[..., start]\n if spec_errs is not None:\n res_fluxerrs[..., j] = spec_errs[..., start]\n\n # Otherwise multiply the first and last old bin widths by P_ij\n else:\n\n start_factor = ((spec_lhs[start + 1] - filter_lhs[j])\n / (spec_lhs[start + 1] - spec_lhs[start]))\n\n end_factor = ((filter_lhs[j + 1] - spec_lhs[stop])\n / (spec_lhs[stop + 1] - spec_lhs[stop]))\n\n spec_widths[start] *= start_factor\n spec_widths[stop] *= end_factor\n\n # Populate res_fluxes spectrum and uncertainty arrays\n f_widths = spec_widths[start:stop + 1] * spec_fluxes[...,\n start:stop + 1]\n res_fluxes[..., j] = np.sum(f_widths, axis=-1)\n res_fluxes[..., j] /= np.sum(spec_widths[start:stop + 1])\n\n if spec_errs is not None:\n e_wid = spec_widths[start:stop + 1] * spec_errs[...,\n start:stop + 1]\n\n res_fluxerrs[..., j] = np.sqrt(np.sum(e_wid ** 2, axis=-1))\n res_fluxerrs[..., j] /= np.sum(spec_widths[start:stop + 1])\n\n # Put back the old bin widths to their initial values for\n # later use\n spec_widths[start] /= start_factor\n spec_widths[stop] /= end_factor\n\n if inplace:\n\n self.dispersion = new_dispersion\n self.flux = res_fluxes\n if spec_errs is not None:\n self.flux_err = res_fluxerrs\n\n self.reset_mask()\n\n else:\n\n spec = self.copy()\n\n spec.dispersion = new_dispersion\n spec.flux = res_fluxes\n if spec_errs is not None:\n spec.flux_err = res_fluxerrs\n\n spec.reset_mask()\n\n return spec", "def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios", "def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd", "def findwavelengthsolution(xarr, farr, sl, sf, ws, mdiff=20, wdiff=20, sigma=5,\n niter=5):\n # match up the features\n # xp, wp=findfeatures(xarr, farr, sl, sf, ws, mdiff=mdiff, wdiff=wdiff,\n # sigma=sigma, niter=niter)\n xp, wp = crosslinematch(xarr, farr, sl, sf, ws, mdiff=mdiff, wdiff=wdiff,\n sigma=sigma, niter=niter)\n\n # find the solution to the best fit\n mask = (wp > 0)\n if mask.sum() >= ws.order:\n nws = WavelengthSolution.WavelengthSolution(\n xp[mask], wp[mask], model=ws.model)\n nws.fit()\n else:\n nws = None\n # for i in range(len(xp)): print xp[i], wp[i], wp[i]-nws.value(xp[i])\n # print nws.sigma(xp,wp)\n return nws", "def reference_spec_wavelength(spec, calib_lst, weight_lst):\n combined_calib = combine_calib(calib_lst, weight_lst)\n\n k = combined_calib['k']\n offset = combined_calib['offset']\n xorder = combined_calib['xorder']\n yorder = combined_calib['yorder']\n npixel = combined_calib['npixel']\n coeff = combined_calib['coeff']\n\n # calculate the wavelength for each aperture\n for row in spec:\n aperture = row['aperture']\n npoints = len(row['wavelength'])\n order = aperture*k + offset\n wavelength = get_wavelength(coeff, npixel,\n np.arange(npoints), np.repeat(order, npoints))\n row['order'] = order\n row['wavelength'] = wavelength\n\n card_lst = []\n #prefix = 'HIERARCH GAMSE WLCALIB'\n #if fiber is not None:\n # prefix = prefix + ' FIBER {}'.format(fiber)\n card_lst.append(('K', k))\n card_lst.append(('OFFSET', offset))\n card_lst.append(('XORDER', xorder))\n card_lst.append(('YORDER', yorder))\n card_lst.append(('NPIXEL', npixel))\n\n # write the coefficients to fits header\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n key = 'COEFF {:d} {:d}'.format(j, i)\n value = coeff[j,i]\n card_lst.append((key, value))\n\n # write information for every reference\n for icalib, (calib, weight) in enumerate(zip(calib_lst, weight_lst)):\n prefix = 'REFERENCE {:d}'.format(icalib+1)\n card_lst.append((prefix+' FILEID', calib['fileid']))\n card_lst.append((prefix+' DATE-OBS', calib['date-obs']))\n card_lst.append((prefix+' EXPTIME', calib['exptime']))\n card_lst.append((prefix+' WEIGHT', weight))\n card_lst.append((prefix+' NTOT', calib['ntot']))\n card_lst.append((prefix+' NUSE', calib['nuse']))\n card_lst.append((prefix+' STDDEV', calib['std']))\n\n return spec, card_lst", "def make_artificial_spectra(line_table, wavelength_range = None, dw = 0.1,\n sigma=1e-2, wavelength_unit=u.angstrom, flux_unit=u.electron):\n if wavelength_range is None:\n wavelength_range=[line_table['wavelength'].min(), line_table['wavelength'].max()]\n\n warr = np.arange(wavelength_range[0], wavelength_range[1], dw)\n\n flux = np.zeros(len(warr), dtype=float)\n for w, f in line_table:\n i = abs(warr - w).argmin()\n flux[i] = f\n\n if wavelength_unit is None:\n wavelength\n\n spec = Spectrum1D(spectral_axis=warr*wavelength_unit, flux = flux * flux_unit)\n spec = gaussian_smooth(spec, stddev=sigma)\n\n return spec", "def Hosek18b(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def find_nearest_wav(self, wavelength):\n\n idx = np.searchsorted(self.wavelengths, wavelength, side=\"left\")\n if idx > 0 and (idx == len(self.wavelengths) or math.fabs(wavelength - self.wavelengths[idx-1]) < math.fabs(wavelength - self.wavelengths[idx])):\n return self.wavelengths[idx-1]\n else:\n return self.wavelengths[idx]", "def resample(s, p, q, h=None):\n gcd = fractions.gcd(p,q)\n if gcd>1:\n p=p/gcd\n q=q/gcd\n \n if h is None: #design filter\n #properties of the antialiasing filter\n log10_rejection = -3.0\n stopband_cutoff_f = 1.0/(2.0 * max(p,q))\n roll_off_width = stopband_cutoff_f / 10.0\n \n #determine filter length\n #use empirical formula from [2] Chap 7, Eq. (7.63) p 476\n rejection_db = -20.0*log10_rejection;\n l = ceil((rejection_db-8.0) / (28.714 * roll_off_width))\n \n #ideal sinc filter\n t = arange(-l, l + 1)\n ideal_filter=2*p*stopband_cutoff_f*sinc(2*stopband_cutoff_f*t) \n \n #determine parameter of Kaiser window\n #use empirical formula from [2] Chap 7, Eq. (7.62) p 474\n beta = kaiser_beta(rejection_db)\n \n #apodize ideal filter response\n h = kaiser(2*l+1, beta)*ideal_filter\n\n ls = len(s)\n lh = len(h)\n\n l = (lh - 1)/2.0\n ly = ceil(ls*p/float(q))\n\n #pre and postpad filter response\n nz_pre = floor(q - mod(l,q))\n nz_pre = int(nz_pre)\n hpad = h[-lh+nz_pre:]\n\n offset = floor((l+nz_pre)/q)\n nz_post = 0;\n while ceil(((ls-1)*p + nz_pre + lh + nz_post )/q ) - offset < ly:\n nz_post += 1\n hpad = hpad[:lh + nz_pre + nz_post]\n\n #filtering\n xfilt = upfirdn(s, hpad, p, q)\n\n return xfilt[int(offset)-1:int(offset)-1+int(ly)]", "def compare_winds(mr1='waroona_run2', mr2='waroona_run2uc', \n hour=datetime(2016,1,5,15),\n extent=None,\n subsubdir=None):\n extentname = mr1.split('_')[0]\n if extent is None:\n extent = constants.extents[extentname]\n \n\n cubes1 = fio.read_model_run(mr1, hour, extent=extent, add_winds=True)\n cubes2 = fio.read_model_run(mr2, hour, extent=extent, add_winds=True)\n \n # pull out horizontal winds\n u,v,s,wd = cubes1.extract(['u','v','s','wind_direction'])\n cu,cv,cs,cwd = cubes2.extract(['u','v','s','wind_direction'])\n dates = utils.dates_from_iris(u)\n height = utils.height_from_iris(s)#.coord('level_height').points\n lats = s.coord('latitude').points\n lons = s.coord('longitude').points\n clats = cs.coords('latitude').points\n clons = cs.coords('longitude').points\n\n ff1,ff2=None,None\n if fio.run_info[mr1]['hasfire']:\n ff1, = fio.read_fire(mr1,dtimes=dates,extent=extent,firefront=True)\n if fio.run_info[mr2]['hasfire']:\n ff2, = fio.read_fire(mr2,dtimes=dates,extent=extent,firefront=True)\n \n # density plot arguments\n bandwidth=1\n # x axis for wind speed\n xs = np.linspace(0,20,100)\n # x axis for wind dir\n xwd = np.linspace(0,360,100)\n # x axis for vert wind speed\n xw = np.linspace(-32,32,100)\n \n ## Colourmap setup for contourf plots\n # Horizontal wind colorbar should be the same for both runs\n hmaxthresh=[5,10,15,20,25,30,35]\n hcmap=plotting._cmaps_['windspeed']\n # vertical wind colourbar is constant\n wcmap=plotting._cmaps_['verticalvelocity']\n wnorm=colors.SymLogNorm(0.25,base=2.) # linear to +- 0.25, then log scale\n wcontours=np.union1d(np.union1d(2.0**np.arange(-2,6),-1*(2.0**np.arange(-2,6))),np.array([0]))\n \n # make 4 vertical bins\n row1 = (0<=height) * (height<500)\n row2 = (500<=height) * (height<2000)\n row3 = (2000<=height) * (height<5000)\n row4 = (5000<=height) * (height<9000)\n \n # loop over datetimes:\n for di, date in enumerate(dates):\n \n s1, s2, s3, s4 = [np.mean(s[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n u1, u2, u3, u4 = [np.mean(u[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n v1, v2, v3, v4 = [np.mean(v[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n wd1, wd2, wd3, wd4 = [np.mean(wd[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n \n cs1, cs2, cs3, cs4 = [np.mean(cs[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n cu1, cu2, cu3, cu4 = [np.mean(cu[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n cv1, cv2, cv3, cv4 = [np.mean(cv[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n cwd1, cwd2, cwd3, cwd4 = [np.mean(cwd[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n \n \n # Plotting\n plt.close()\n fig, axes = plt.subplots(4,4,figsize=[12,12])\n for i, (si, ui, vi, wdi, csi, cui, cvi, cwdi) in \\\n enumerate(zip([s1,s2,s3,s4],[u1,u2,u3,u4],[v1,v2,v3,v4],[wd1,wd2,wd3,wd4],\n [cs1,cs2,cs3,cs4],[cu1,cu2,cu3,cu4],[cv1,cv2,cv3,cv4],[cwd1,cwd2,cwd3,cwd4])):\n ## Show contourf of wind speed\n plt.sca(axes[0,i])\n \n # first determine colourmap contours so that they are the same between plots (and useful)\n hmax_index=np.sum(np.max(np.max(si),np.max(csi))>hmaxthresh)\n hcontours=np.linspace(0,hmaxthresh[hmax_index],20)\n \n # plot the filled contour for h-wind speeds\n plotting.map_contourf(extent, si, lats, lons, cmap=hcmap,\n clabel=\"\", cbar=False,\n cbarform=None,\n levels=hcontours)\n # overlaid with quiver of wind dir\n #plotting.map_quiver(ui,vi,lats,lons,nquivers=7)\n plt.streamplot(lons,lats,ui,vi,color='grey',minlength=0.5)\n # set limits back to latlon limits\n plt.ylim(lats[0],lats[-1])\n plt.xlim(lons[0],lons[-1])\n \n # Add locations and fire\n plotting.map_add_locations_extent(extentname, \n hide_text=True,\n color='k')\n if ff1 is not None:\n plotting.map_fire(ff1[di].data,lats,lons)\n # Add ylabel on left most plot\n if i==0: plt.ylabel(mr1)\n # add title along top row\n plt.title(['<500m','500m-2000m','2km-5km','5km-9km'][i])\n \n ## for comparison model also\n plt.sca(axes[1,i])\n img,_ = plotting.map_contourf(extent, csi, clats, clons, cmap=hcmap, \n clabel=\"\", \n cbar=False, cbarform=None,\n levels=hcontours,)\n \n # overlaid with quiver of wind dir\n #plotting.map_quiver(cui,cvi,lats,lons,nquivers=7)\n \n plt.streamplot(clons,clats,cui,cvi,color='grey',minlength=0.5)\n # set limits back to latlon limits\n plt.ylim(lats[0],lats[-1])\n plt.xlim(lons[0],lons[-1])\n \n plotting.map_add_locations_extent(extentname, \n hide_text=True,\n color='k')\n # add fire\n if ff2 is not None:\n plotting.map_fire(ff2[di].data,clats,clons)\n # add label on leftmost\n if i==0: plt.ylabel(mr2)\n \n ## Add colourbar for column\n cbar_ax = fig.add_axes([0.14+0.2025*i, 0.508, 0.14, 0.01])# X Y Width Height\n cbar=fig.colorbar(img, cax=cbar_ax, format=ticker.ScalarFormatter(), pad=0,\n orientation='horizontal')\n cbar.set_ticks(np.arange(0,hmaxthresh[hmax_index]+1,5))\n cbar.set_ticklabels(np.arange(0,hmaxthresh[hmax_index]+1,5))\n \n ## add density plot for wind speed\n plt.sca(axes[2,i])\n # density function:\n sdens = gaussian_kde(si.flatten(),bw_method=bandwidth)\n plt.plot(xs,sdens(xs), label=mr1,linewidth=2)\n csdens = gaussian_kde(csi.flatten(),bw_method=bandwidth)\n plt.plot(xs,csdens(xs), label=mr2)\n plt.yticks([],[])\n if i==0: plt.ylabel('wind speed density')\n if i==3: plt.legend()\n \n \n ## add density plot for wind dir\n plt.sca(axes[3,i])\n # density function:\n wddens = gaussian_kde(wdi.flatten(),bw_method=bandwidth)\n plt.plot(xwd,wddens(xwd), label=mr1, linewidth=2)\n cwddens = gaussian_kde(cwdi.flatten(),bw_method=bandwidth)\n plt.plot(xwd,cwddens(xwd), label=mr2, linewidth=2, linestyle='--')\n plt.yticks([],[])\n if i==0: plt.ylabel('wind dir density')\n if i==3: plt.legend()\n \n plt.suptitle(date.strftime(\"%Y%m%d %H:%M(UTC)\"))\n subdir='horizontal'\n if subsubdir is not None:\n subdir += '/'+subsubdir\n fio.save_fig(mr2, _sn_, date, plt, subdir=subdir)\n \n ## Also want to look at vertical winds\n w, = cubes1.extract(['upward_air_velocity'])\n cw, = cubes2.extract(['upward_air_velocity'])\n \n w1, w2, w3, w4 = [np.mean(w[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n cw1, cw2, cw3, cw4 = [np.mean(cw[di,row,:,:].data, axis=0) for row in [row1,row2,row3,row4]]\n \n # Plotting\n plt.close()\n fig, axes = plt.subplots(3,4,figsize=[12,12])\n for i, (wi, cwi) in enumerate(zip([w1,w2,w3,w4],[cw1,cw2,cw3,cw4])):\n ## Show contourf of wind speed\n plt.sca(axes[0,i])\n plotting.map_contourf(extent, wi, lats,lons,cmap=wcmap,clabel=\"\",levels=wcontours,norm=wnorm,cbar=False,cbarform=None)\n plotting.map_add_locations_extent(extentname, \n hide_text=True,\n color='k')\n if ff1 is not None:\n plotting.map_fire(ff1[di].data,lats,lons)\n if i==0: plt.ylabel(mr1)\n plt.title(['<500m','500m-2000m','2km-5km','5km-9km'][i])\n \n ## for comparison model also\n plt.sca(axes[1,i])\n img,_=plotting.map_contourf(extent, cwi, clats,clons,cmap=wcmap,clabel=\"\",levels=wcontours,norm=wnorm,cbar=False,cbarform=None)\n plotting.map_add_locations_extent(extentname, \n hide_text=True,\n color='k')\n if ff2 is not None:\n plotting.map_fire(ff2[di].data,clats,clons)\n if i==0: plt.ylabel(mr2)\n \n ## add density plot for wind speed\n plt.sca(axes[2,i])\n ## density function:\n #wdens = gaussian_kde(wi.flatten(),bw_method=bandwidth)\n #plt.plot(xw,wdens(xw), label=mr1, linewidth=2)\n #cwdens = gaussian_kde(cwi.flatten(),bw_method=bandwidth)\n #plt.plot(xw,cwdens(xw), label=mr2, linewidth=2, linestyle='--')\n ## cumulative frequency\n wcdf = cumfreq(wi.flatten(), numbins=len(xw), defaultreallimits=(min(xw),max(xw)))\n plt.plot(xw,wcdf.cumcount,label=mr1, linewidth=2)\n cwcdf = cumfreq(cwi.flatten(), numbins=len(xw), defaultreallimits=(min(xw),max(xw)))\n plt.plot(xw,cwcdf.cumcount,label=mr2, linewidth=2, linestyle='--')\n plt.yticks([],[])\n if i==0: plt.ylabel('wind speed cdf')\n if i==3: plt.legend()\n ## Add colourbar\n cbar_ax = fig.add_axes([.35, 0.37, 0.3, 0.015])# X Y Width Height\n cbar=fig.colorbar(img, cax=cbar_ax, format=ticker.ScalarFormatter(), pad=0,\n orientation='horizontal')\n cbar.set_ticks([-32,-8,-2,0,2,8,32])\n cbar.set_ticklabels([-32,-8,-2,0,2,8,32])\n # title and save\n plt.suptitle(date.strftime(\"%Y%m%d %H:%M(UTC)\"))\n \n subdir='vertical'\n if subsubdir is not None:\n subdir += '/'+subsubdir\n fio.save_fig(mr2, _sn_, date, plt, subdir=subdir)", "def perform_spectral_interpolation_only_gaussian(gaussian_data):\n dframe = pd.DataFrame()\n\n wavelength1 = gaussian_data[:, -1]\n\n sampled_wavelength1 = np.arange(min(wavelength1), max(wavelength1), 2)\n a1_val = gaussian_data[:, 0]\n sigma1 = gaussian_data[:, 1]\n\n # A1 first\n fit_params_a1 = interp1d(wavelength1, a1_val, kind='linear')\n fitted_val_a1 = fit_params_a1(sampled_wavelength1)\n # Now A2\n\n\n # Now Sigma1\n fit_params_sigma1 = interp1d(wavelength1, sigma1, kind='linear')\n fitted_val_sigma1 = fit_params_sigma1(sampled_wavelength1)\n\n # Now Sigma2\n\n\n# plt.plot(wavelength1, Sigma1, 'bo')\n# plt.plot(sampled_wavelength1, fitted_val_Sigma1, 'ro--', markersize=3)\n# plt.grid(True, linestyle=':')\n# plt.show()\n dframe = pd.DataFrame({'W1' : sampled_wavelength1,\n 'A1' : fitted_val_a1,\n 'Sigma1' : fitted_val_sigma1,\n })\n\n return dframe.round(3)", "def combine_sky_spectra(name):\n sky_list = get_sky_spectra(name)\n sizes = get(name, 'sizes')\n scaled = []\n for spectra in sky_list:\n scale = sizes[spectra] # scale by the number of pixels arcoss\n num = zerocount(spectra)\n sarith('%s/disp/%s.1d' % (name, num), '/', scale,\n '%s/sky/%s.scaled' % (name, num))\n scaled.append('%s/sky/%s.scaled' % (name, num))\n if os.path.isfile('%s/sky.1d' % name):\n os.remove('%s/sky.1d' % name)\n scombine(list_convert(scaled), '%s/sky.1d' % name)", "def wave(crval1, crpix1, cdelt1, i, loglin=True, angstroms=True):\n crval1, crpix1, cdelt1 = tuple(map(np.float64, (crval1, crpix1, cdelt1)))\n if type(i) is np.ndarray:\n i = i.astype(np.float64)\n elif type(i) is int:\n i = float(i)\n else:\n raise Exception(\"unsupported index type: %s\" % (str(type(i))))\n\n # if angstroms:\n # crval1 *= 1.E3\n\n wv = crval1 + cdelt1 * (i + 1 - crpix1)\n return 10. ** wv if loglin else wv", "def add_wavelength(filename, model, std_tol, overwrite=False, plot_path=None):\n hdulist = fits.open(filename)\n\n # read both hdu's\n logger.debug(\"\\tObject: {}\".format(hdulist[0].header['OBJECT']))\n\n # extract just the middle part of the CCD (we only really care about Halpha)\n tbl = Table(hdulist[1].data)\n\n if 'wavelength' in tbl.colnames and not overwrite:\n logger.debug(\"\\tTable already contains wavelength values!\")\n return\n\n # compute wavelength array for the pixels\n wavelength, var = model.gp.predict(model.y, tbl['pix']-model.x_shift,\n return_var=True)\n bad_idx = np.sqrt(var) > std_tol.to(u.angstrom).value\n wavelength[bad_idx] = np.nan\n\n tbl['wavelength'] = wavelength\n tbl['wavelength_err'] = np.sqrt(var)\n\n new_hdu1 = fits.table_to_hdu(tbl)\n new_hdulist = fits.HDUList([hdulist[0], new_hdu1])\n\n logger.debug(\"\\tWriting out file with wavelength array.\")\n new_hdulist.writeto(filename, overwrite=True)\n\n if plot_path is not None:\n # plot the spectrum vs. wavelength\n fig,axes = plt.subplots(2, 1, figsize=(12,8), sharex=True)\n\n axes[0].plot(tbl['wavelength'], tbl['source_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[0].errorbar(tbl['wavelength'], tbl['source_flux'], 1/np.sqrt(tbl['source_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[0].set_ylim(tbl['source_flux'][200]/4, np.nanmax(tbl['source_flux']))\n axes[0].set_yscale('log')\n\n axes[1].plot(tbl['wavelength'], tbl['background_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[1].errorbar(tbl['wavelength'], tbl['background_flux'], 1/np.sqrt(tbl['background_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[1].set_ylim(1e-1, np.nanmax(tbl['background_flux']))\n axes[1].set_yscale('log')\n\n fig.tight_layout()\n _filename_base = path.splitext(path.basename(filename))[0]\n fig.savefig(path.join(plot_path, '{0}_1d_wvln.png'\n .format(_filename_base)))\n\n plt.close(fig)", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def direct_sound(rir):\n all_peak_indices = local_max(rir,threshold=1e-5)\n i = np.argsort(rir[all_peak_indices])\n direct_sound_index = all_peak_indices[i][-1]\n peak_indices = all_peak_indices[np.where(all_peak_indices < \n direct_sound_index)[0]]\n values = rir[peak_indices]\n # direct sound side lobe range\n rng = direct_sound_index - max(peak_indices[np.where(values < \n (rir[direct_sound_index] * 0.02))[0]])\n return direct_sound_index, rng, all_peak_indices", "def test_equivalent_width():\n\n spec = IGRINSSpectrum(file=file)\n mu = np.median(spec.wavelength.value)\n equivalent_width = spec.measure_ew(mu)\n\n assert equivalent_width is not None\n assert type(equivalent_width) is not int\n assert type(equivalent_width) is astropy.units.quantity.Quantity\n new_unit = equivalent_width.to(spec.wavelength.unit)\n assert new_unit.unit == spec.wavelength.unit", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def Create_Constant_WavelengthArray(spec_cube,final_wave_start,final_wave_end):\n\tdwave = np.zeros(len(spec_cube))\n\tfor n in xrange(len(spec_cube)):\n\t\ttemp_final_wave = spec_cube[n][0] # Take one of the spectrum use its resolution\n\t\tdwave[n] = np.median(temp_final_wave[1:] - temp_final_wave[:-1])\n\tdwave = np.max(dwave)\n\tfinal_wave = np.arange(final_wave_start,final_wave_end,dwave)\n\tprint 'Since input dv = 0 -> median resolution (constant) dwave = %f angstrom is used.' % dwave\n\treturn final_wave", "def single_slit_diffraction_intensity(slit_width, wavelength, screen_distance, X):\n return ((np.sin((np.pi * slit_width * X) / (wavelength * screen_distance))) / (\n (np.pi * slit_width * X) / (wavelength * screen_distance))) ** 2", "def sky_noise_weighting(file_name, sky_file_name):\n cs_data = spectra_analysis(file_name, sky_file_name)\n cube_data = cs_data['gd_shifted']\n sn_data = cs_data['sky_noise']\n wl_soln = wavelength_solution(file_name)\n\n sn_data_min = np.min(sn_data)\n in_wt = 1 / (sn_data - sn_data_min + 1)\n\n sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise\n for i in range(len(in_wt)): \n data_acl = cube_data[i]\n data_sky = sn_data[i]\n data_prb = in_wt[i]\n \n if ( 0.00 <= np.abs(data_prb) <= 1.00 ):\n sky_regns[i][0] = data_prb\n sky_regns[i][1] = data_sky\n\n # finding max peak in the sky-noise data and fitting a Gaussian to that\n # x-axis data\n x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n\n # Finding peaks with PeakUtils\n sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)\n sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)\n\n if (sky_peaks_x.size != 0):\n sky_peak = sky_peaks_x[0]\n sky_peak_index = find_nearest(sky_peak, x_range)\n else:\n sky_peak = 6000\n sky_peak_index = 0\n\n sky_peak_loc = x_range[sky_peak_index]\n\n sky_peak_range = [sky_peak-100, sky_peak+100]\n sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]\n\n sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n\n sky_gauss_params = Parameters()\n sky_gauss_params.add('c', value=0)\n sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)\n sky_gauss_params.add('mu', value=sky_peak_loc)\n sky_gauss_params.add('sigma1', value=3)\n\n sky_gauss_model = Model(sn_gauss)\n sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x, \n params=sky_gauss_params)\n sky_gauss_best = sky_gauss_rslt.best_values\n\n sky_sigma = sky_gauss_best['sigma1']\n\n return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}", "def autoscales(N, dt, dj, wf, w0):\n \n if wf == 'morlet':\n s0 = (dt * (w0 + np.sqrt(2 + w0**2))) / (PI2)\n else:\n raise ValueError('wavelet function not available')\n\n J = np.floor(dj**-1 * np.log2((N * dt) / s0))\n s = np.empty(int(J + 1))\n\n for i in range(s.shape[0]):\n s[i] = s0 * 2**(i * dj)\n\n return s", "def __init__(self, downsampling=False, wl_range = False, spectra_path = '/Users/kyleturner/Dropbox/My_Box/Code/M_Files/process_svc_v2p1/rrs_model_3C-master/spectra/'):\n\n\t\t\ta_ph = pd.read_csv(spectra_path + 'phyto.A', skiprows=11, sep='\\t', index_col=0).iloc[:,0]\n\t\t\n\t\t\ta_w = pd.read_csv(spectra_path + 'water.A', skiprows=10, sep='\\t', index_col=0).iloc[:,0]\n\t\t\tdaw_dT = pd.read_csv(spectra_path + 'daWdT.txt', skiprows=10, sep='\\t', index_col=0).iloc[:,0]\n\t\t\tastar_y = pd.read_csv(spectra_path + 'Y.A', skiprows=11, delimiter=' ', index_col=0).loc[350:].iloc[:,0]\n\n\t\t\td = pd.DataFrame(index = a_ph.index)\n\t\t\td.index.name = 'Wavelength, [nm]'\n\t\t\td['astar_ph'] = a_ph\n\t\t\td['astar_y'] = astar_y \n\t\t\td['a_w'] = a_w \n\t\t\td['daw_dT'] = daw_dT\n\n\t\t\tif downsampling == False:\n\t\t\t\tself.spectra = d\n\t\t\telse: \n\t\t\t\tself.spectra = d.apply(lambda x: pd.rolling_mean(x, downsampling, center=True))\n\n\t\t\tif wl_range != False:\n\t\t\t\tself.spectra = self.spectra.loc[wl_range[0]:wl_range[1]].dropna()\n\n\t\t\tself.wl = np.array(self.spectra.index)\n\t\t\t\t\n\t\t\tself.model = self._compile_model()", "def _waist_from_q(q, wavelength):\n\n return np.sqrt(wavelength / (np.pi * np.imag(-1 / q)))", "def wind_adjust_func(uz_array, zw):\n return uz_array * 4.87 / np.log(67.8 * zw - 5.42)", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def set_wl_bounds(self):\n wls = self.xds['wl'].values\n min_wl, max_wl = min(wls), max(wls)\n center = (max_wl+min_wl)/2\n min_wl, max_wl, center = round(min_wl), round(max_wl), round(center)\n self.param.band.bounds = (min_wl, max_wl)\n self.band = center - self.bw/2, center + self.bw/2\n self.center = center", "def count_convert_wavelength_range(self):\n mini = ct.c_float()\n maxi = ct.c_float()\n self.lib.GetCountConvertWavelengthRange(ct.pointer(mini),\n ct.pointer(maxi))\n return (mini.value, maxi.value)", "def ex2d(image, ivar, psf, specrange, wavelengths, xyrange=None,\n full_output=False, regularize=0.0):\n\n #- Range of image to consider\n waverange = (wavelengths[0], wavelengths[-1])\n \n if xyrange is None:\n xmin, xmax, ymin, ymax = xyrange = psf.xyrange(specrange, waverange)\n image = image[ymin:ymax, xmin:xmax]\n ivar = ivar[ymin:ymax, xmin:xmax]\n else:\n xmin, xmax, ymin, ymax = xyrange\n\n nx, ny = xmax-xmin, ymax-ymin\n npix = nx*ny\n \n nspec = specrange[1] - specrange[0]\n nwave = len(wavelengths)\n \n #- Solve AT W pix = (AT W A) flux\n \n #- Projection matrix and inverse covariance\n A = psf.projection_matrix(specrange, wavelengths, xyrange)\n\n #- Pixel weights matrix\n w = ivar.ravel()\n W = spdiags(ivar.ravel(), 0, npix, npix)\n\n #-----\n #- Extend A with an optional regularization term to limit ringing.\n #- If any flux bins don't contribute to these pixels,\n #- also use this term to constrain those flux bins to 0.\n \n #- Original: exclude flux bins with 0 pixels contributing\n # ibad = (A.sum(axis=0).A == 0)[0]\n \n #- Identify fluxes with very low weights of pixels contributing \n fluxweight = W.dot(A).sum(axis=0).A[0]\n minweight = 0.01*np.max(fluxweight)\n ibad = fluxweight < minweight\n \n #- Add regularization of low weight fluxes\n I = regularize*scipy.sparse.identity(nspec*nwave)\n I.data[0,ibad] = minweight - fluxweight[ibad]\n \n #- Only need to extend A if regularization is non-zero\n if np.any(I.data):\n pix = np.concatenate( (image.ravel(), np.zeros(nspec*nwave)) )\n Ax = scipy.sparse.vstack( (A, I) )\n wx = np.concatenate( (w, np.ones(nspec*nwave)) )\n else:\n pix = image.ravel()\n Ax = A\n wx = w\n\n #- Inverse covariance\n Wx = spdiags(wx, 0, len(wx), len(wx))\n iCov = Ax.T.dot(Wx.dot(Ax))\n \n #- Solve (image = A flux) weighted by Wx:\n #- A^T W image = (A^T W A) flux = iCov flux \n y = Ax.T.dot(Wx.dot(pix))\n \n xflux = spsolve(iCov, y).reshape((nspec, nwave))\n\n #- Solve for Resolution matrix\n try:\n R, fluxivar = resolution_from_icov(iCov)\n except np.linalg.linalg.LinAlgError, err:\n outfile = 'LinAlgError_{}-{}_{}-{}.fits'.format(specrange[0], specrange[1], waverange[0], waverange[1])\n print \"ERROR: Linear Algebra didn't converge\"\n print \"Dumping {} for debugging\".format(outfile)\n import fitsio\n fitsio.write(outfile, image, clobber=True)\n fitsio.write(outfile, ivar, extname='IVAR')\n fitsio.write(outfile, A.data, extname='ADATA') \n fitsio.write(outfile, A.indices, extname='AINDICES')\n fitsio.write(outfile, A.indptr, extname='AINDPTR')\n fitsio.write(outfile, iCov.toarray(), extname='ICOV')\n raise err\n \n #- Convolve with Resolution matrix to decorrelate errors\n fluxivar = fluxivar.reshape((nspec, nwave))\n rflux = R.dot(xflux.ravel()).reshape(xflux.shape)\n\n if full_output:\n results = dict(flux=rflux, ivar=fluxivar, R=R, xflux=xflux, A=A)\n results['iCov'] = iCov\n return results\n else:\n return rflux, fluxivar, R", "def guess_wavelength(x, aperture, identlist, linelist, param):\n rough_wl = None\n\n # guess wavelength from the identified lines in this order\n if aperture in identlist:\n list1 = identlist[aperture]\n if list1.size >= 2:\n fit_order = min(list1.size-1, 2)\n local_coeff = np.polyfit(list1['pixel'], list1['wavelength'],\n deg=fit_order)\n rough_wl = np.polyval(local_coeff, x)\n\n # guess wavelength from global wavelength solution\n if rough_wl is None and param['coeff'].size > 0:\n npixel = param['npixel']\n order = aperture*param['k'] + param['offset']\n rough_wl = get_wavelength(param['coeff'], param['npixel'], x, order)\n\n if rough_wl is None:\n return None\n else:\n # now find the nearest wavelength in linelist\n wave_list = np.array([line[0] for line in linelist])\n iguess = np.abs(wave_list-rough_wl).argmin()\n guess_wl = wave_list[iguess]\n return guess_wl", "def generate_wavelength_model(comp_lamp_path, night_path, plot_path):\n\n # read 1D comp lamp spectrum\n spec = Table.read(comp_lamp_path)\n\n # read wavelength guess file\n guess_path = path.abspath(path.join(night_path,\n '..', 'wavelength_guess.csv'))\n pix_wav = np.genfromtxt(guess_path, delimiter=',', names=True)\n\n # get emission line centroids at the guessed positions of the lines\n pix_x0s = fit_all_lines(spec['pix'], spec['flux'], spec['ivar'],\n pix_wav['wavelength'], pix_wav['pixel'])\n\n # only keep successful ones:\n mask = np.isfinite(pix_x0s)\n logger.debug(\"Successfully fit {}/{} comp. lamp lines\"\n .format(mask.sum(), len(mask)))\n pix_wav = pix_wav[mask]\n pix_x0s = pix_x0s[mask]\n\n # --------------------------------------------------------------------------\n # fit a gaussian process to determine the pixel-to-wavelength transformation\n #\n idx = np.argsort(pix_x0s)\n med_x = np.median(pix_x0s[idx])\n x = pix_x0s[idx] - med_x\n y = pix_wav['wavelength'][idx]\n\n model = GPModel(x=x, y=y, n_bg_coef=n_bg_coef, x_shift=med_x)\n\n # Fit for the maximum likelihood parameters\n bounds = model.gp.get_parameter_bounds()\n init_params = model.gp.get_parameter_vector()\n soln = minimize(model, init_params, method=\"L-BFGS-B\",\n bounds=bounds)\n model.gp.set_parameter_vector(soln.x)\n logger.debug(\"Success: {}, Final log-likelihood: {}\".format(soln.success,\n -soln.fun))\n\n # ---\n # residuals to the mean model\n x_grid = np.linspace(0, 1600, 1024) - med_x\n mu, var = model.gp.predict(y, x_grid, return_var=True)\n std = np.sqrt(var)\n\n _y_mean = model.mean_model.get_value(x)\n _mu_mean = model.mean_model.get_value(x_grid)\n\n # Plot the maximum likelihood model\n fig,ax = plt.subplots(1, 1, figsize=(8,8))\n\n # data\n ax.scatter(x + med_x, y - _y_mean, marker='o')\n\n # full GP model\n gp_color = \"#ff7f0e\"\n ax.plot(x_grid+med_x, mu - _mu_mean, color=gp_color, marker='')\n ax.fill_between(x_grid+med_x, mu+std-_mu_mean, mu-std-_mu_mean,\n color=gp_color, alpha=0.3, edgecolor=\"none\")\n\n ax.set_xlabel('pixel')\n ax.set_ylabel(r'wavelength [$\\AA$]')\n ax.set_title(path.basename(comp_lamp_path))\n\n fig.tight_layout()\n fig.savefig(path.join(plot_path, 'wavelength_mean_subtracted.png'), dpi=200)\n # ---\n\n # ---\n # residuals to full GP model\n mu, var = model.gp.predict(y, x_grid, return_var=True)\n std = np.sqrt(var)\n\n y_mu, var = model.gp.predict(y, x, return_var=True)\n\n # Plot the maximum likelihood model\n fig,ax = plt.subplots(1, 1, figsize=(12,8))\n\n # data\n ax.scatter(x + med_x, y - y_mu, marker='o')\n\n gp_color = \"#ff7f0e\"\n ax.plot(x_grid+med_x, mu-mu, color=gp_color, marker='')\n ax.fill_between(x_grid+med_x, std, -std, color=gp_color,\n alpha=0.3, edgecolor=\"none\")\n\n ax.set_xlabel('pixel')\n ax.set_ylabel(r'wavelength residual [$\\AA$]')\n ax.set_title(path.basename(comp_lamp_path))\n\n ax.set_ylim(-1, 1)\n ax.axvline(683., zorder=-10, color='#666666', alpha=0.5)\n\n ax2 = ax.twinx()\n ax2.set_ylim([x/6563*300000 for x in ax.get_ylim()])\n ax2.set_ylabel(r'velocity error at ${{\\rm H}}_\\alpha$ [{}]'\n .format((u.km/u.s).to_string(format='latex_inline')))\n\n fig.tight_layout()\n fig.savefig(path.join(plot_path, 'wavelength_residuals.png'), dpi=200)\n # --------------------------------------------------------------------------\n\n return model" ]
[ "0.6577591", "0.6556523", "0.6496855", "0.64009374", "0.6335774", "0.62576574", "0.6154168", "0.60373366", "0.5991376", "0.5960921", "0.59510165", "0.5908147", "0.5871862", "0.58661956", "0.58438027", "0.5817838", "0.5814624", "0.5808811", "0.58038354", "0.5801125", "0.57996815", "0.5792571", "0.5785777", "0.57318074", "0.573162", "0.57239443", "0.57232106", "0.57221806", "0.57070905", "0.56993026", "0.56955343", "0.5657596", "0.5649021", "0.56256986", "0.5612314", "0.5607732", "0.56066597", "0.5604316", "0.5596122", "0.559371", "0.5590364", "0.55871993", "0.55812806", "0.5581058", "0.5575667", "0.5555707", "0.55556154", "0.5554827", "0.5536729", "0.55288106", "0.5524708", "0.55226696", "0.5519146", "0.55154383", "0.55002534", "0.54957837", "0.54943", "0.54907405", "0.5470424", "0.5461709", "0.5455904", "0.54501736", "0.5446339", "0.5433866", "0.543245", "0.54180497", "0.5414791", "0.5414318", "0.54133326", "0.5408512", "0.54079276", "0.5403602", "0.5401329", "0.53943765", "0.5390565", "0.5389766", "0.5385764", "0.5375432", "0.5356737", "0.5354246", "0.5349954", "0.53495514", "0.5345942", "0.5341276", "0.5339836", "0.53397024", "0.5339105", "0.5337253", "0.5330819", "0.5325974", "0.53186893", "0.5317575", "0.5310217", "0.5308611", "0.5305171", "0.5302775", "0.5302627", "0.5300377", "0.5299498", "0.5299448" ]
0.59196925
11
Takes two files in a list as input eg. data = [path1,path2]
def _input_as_string(self,data): inputFiles = ' '.join(data) self._input_filename = data return inputFiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(file1, file2):\n with open(file1, 'r') as f1, open(file2, 'r') as f2:\n return [line for line in f1], [line for line in f2]", "def load_files_to_compare(self):\n self.first_source_data = load_path(self.path1)\n self.second_source_data = load_path(self.path2)", "def load_files(file1, file2):\n\n f = open(\"results/\" + file1, \"r\")\n results = json.loads(f.read())\n f.close()\n\n f = open(\"results/\" + file2, \"r\")\n previous_results = json.loads(f.read())\n f.close()\n\n compare(results, previous_results)", "def joinInds(r1,r2,outfname):\n outf = open(outfname,'w')\n f1 = file(r1,'r')\n f2 = file(r2,'r')\n for row1 in f1:\n outf.write('%s\\n' % (row1.strip()))\n for row1 in f2:\n outf.write('%s\\n' % (row1.strip()))\n outf.close()", "def open_and_read_file(statuses_1, statuses_2):\n\n # contents1 = open(file_path1).read()\n # contents2 = open(file_path2).read()\n\n # contents = contents1 + contents2\n contents = [s.text for s in statuses_1] + [s.text for s in statuses_2]\n contents = ' '.join(contents)\n\n # contents = statuses_1 + statuses_2\n\n return contents", "def load_data(fpath1, fpath2, maxlen1, maxlen2):\r\n sents1, sents2 = [], []\r\n with open(fpath1, 'r') as f1, open(fpath2, 'r') as f2:\r\n for sent1, sent2 in zip(f1, f2):\r\n if len(sent1.split()) + 1 > maxlen1:\r\n continue # 1: </s>\r\n if len(sent2.split()) + 1 > maxlen2:\r\n continue # 1: </s>\r\n sents1.append(sent1.strip())\r\n sents2.append(sent2.strip())\r\n return sents1, sents2", "def combine_data(data_file_1,data_file_2,lookup,outfile):\n # Read in tabbed data\n print(\"Reading in data from %s\" % data_file_1)\n data1 = IndexedFile(data_file_1,first_line_is_header=True)\n print(\"Reading in data from %s\" % data_file_2)\n data2 = IndexedFile(data_file_2,first_line_is_header=True)\n\n # Open output file\n fp = io.open(outfile,'wt')\n\n # Call main function to do the actual work\n combine_data_main(data1,data2,lookup,fp)\n\n # Finished\n fp.close()\n print(\"Output written to '%s'\" % outfile)", "def open_and_read_file(file_path1, file_path2):\n\n # your code goes here\n file_object1 = open(file_path1)\n file_object2 = open(file_path2)\n contents = file_object1.read() + \" \" + file_object2.read()\n file_object1.close()\n file_object2.close()\n\n return contents", "def only_in_one_to_json(\n self,\n path_dir_1,\n list_in_1,\n path_dir_2,\n list_in_2):\n only_in_one_json = []\n for fl in list_in_1:\n file_path_1 = os.path.join(path_dir_1, fl)\n file_size_1 = os.path.getsize(file_path_1)\n type_ = self.get_type(file_path_1)\n if (type_ == self.dir):\n file_size_1 = None\n only_in_one_json.append(self.cmp_files_to_json(\n file_path_1,\n type_,\n True,\n False,\n file_size_1,\n None,\n False,\n None))\n\n for fl in list_in_2:\n file_path_2 = os.path.join(path_dir_2, fl)\n file_size_2 = os.path.getsize(file_path_2)\n type_ = self.get_type(file_path_2)\n if (type_ == self.dir):\n file_size_2 = None\n\n only_in_one_json.append(self.cmp_files_to_json(\n file_path_2,\n type_,\n False,\n True,\n None,\n file_size_2,\n False,\n None))\n\n return only_in_one_json", "def compare_heads(list1, list2, diff_list):\n for line in list1:\n if line not in list2:\n diff_list.append(FILE1_SIGN + line)\n for line in list2:\n if line not in list1:\n diff_list.append(FILE2_SIGN + line)", "def assertEqualPathsList(first: Iterable[str], second: Iterable[str]) -> None: # pragma: no cover\n if any(isPass(path) for path in first):\n return\n if any(isPass(path) for path in second):\n return\n for fpath in first:\n assert any(fnmatch.fnmatch(fpath, spath) for spath in second)\n for spath in second:\n assert any(fnmatch.fnmatch(fpath, spath) for fpath in first)", "def append_files(in_file1, character, in_file2, out_file):\n return_data = 0\n\n write_data = ''\n\n i = 0\n try:\n with open(in_file1, 'rt') as fi1:\n lines1 = fi1.readlines() # Read all the lines in fi1 as a tuple\n \n with open(in_file2, 'rt') as fi2:\n lines2 = fi2.readlines() # Read all the lines in fi2 as a tuple\n \n with open(out_file, 'at') as fo:\n fo.seek(0,2)\n while i < len(lines1):\n lines1[i] = lines1[i].rstrip('\\n')\n #lines1[i] = lines1[i].rstrip('\\r')\n fo.write(lines1[i] + character + lines2[i])\n i = i + 1\n print(write_data)\n except IOError:\n print(\"Error in reading/writing file.\")\n return_data = 2\n else:\n print('Operation completed successfully.')\n return_data = 1\n finally:\n fi2.close()\n fi1.close()\n fo.close()\n print(\"done\")\n return return_data", "def merge_files(file_one, file_two):\n\n merged_lines = []\n line1 = file_one.readline()\n line2 = file_two.readline()\n\n while line1 != '' and line2 != '':\n if line1 < line2:\n merged_lines.append(line1)\n line1 = file_one.readline()\n else:\n merged_lines.append(line2)\n line2 = file_two.readline()\n\n if file_one == '':\n merged_lines.append(line2)\n lastlines = file_two.readlines()\n else:\n merged_lines.append(line1)\n lastlines = file_one.readlines()\n \n merged_lines.extend(lastlines)\n\n return merged_lines", "def copy_separate_files(source,dest1,dest2):\r\n filelist = os.listdir(source)\r\n \r\n if not os.path.exists(dest1):\r\n os.mkdir(dest1)\r\n \r\n if not os.path.exists(dest2):\r\n os.mkdir(dest2)\r\n \r\n for filename in filelist:\r\n source_file = os.path.join(source,filename)\r\n \r\n if filename[-4:] == '.png':\r\n shutil.copy(source_file,dest1)\r\n else:\r\n shutil.copy(source_file,dest2)", "def joinRows(r1,r2,outfname):\n outf = open(outfname,'w')\n f1 = file(r1,'r')\n f2 = file(r2,'r')\n for row1 in f1:\n if row1.strip() > '':\n row2 = f2.next()\n outf.write('%s%s\\n' % (row1.strip(),row2.strip()))\n outf.close()", "def dojoin(ipath1,ipath2,opath):\n r1 = '%s.map' % ipath1\n r2 = '%s.map' % ipath2\n if not mapsMatch(r1,r2):\n print '### maps %s and %s do not match' % (r1,r2)\n sys.exit(1)\n outpath = '%s.map' % opath\n shutil.copyfile(r1,outpath)\n r1 = '%s.eigenstratgeno' % ipath1\n r2 = '%s.eigenstratgeno' % ipath2\n outpath = '%s.eigenstratgeno' % opath\n joinRows(r1,r2,outpath)\n outpath = '%s.ind' % opath\n r1 = '%s.ind' % ipath1\n r2 = '%s.ind' % ipath2\n joinInds(r1,r2,outpath)", "def merge(file_a, file_b):\n res = []\n a = read_bin_file_in_full(file_a)\n b = read_bin_file_in_full(file_b)\n pa = 0\n pb = 0\n while pa < len(a) and pb < len(b):\n if a[pa] < b[pb]:\n res.append(a[pa])\n pa += 1\n else:\n res.append(b[pb])\n pb += 1\n while pa < len(a):\n res.append(a[pa])\n pa += 1\n while pb < len(b):\n res.append(b[pb])\n pb += 1\n\n return write_to_temp(res)", "def get_data_files(source_dest_pairs):\n data_files = []\n for src_dir, dest_dir in source_dest_pairs:\n for src_root, _, files in os.walk(src_dir):\n dest_root = src_root.replace(src_dir, dest_dir, 1)\n dir_files = []\n for file_ in files:\n dir_files.append(os.path.join(src_root, file_))\n data_files.append((dest_root, dir_files))\n return data_files", "def compfile(input_path, name=\"\", list_files=None):\n __string(input_path, \"%s path\" % name, True)\n\n if list_files is None:\n list_files = []\n elif not list_files:\n __ex(\"File list is empty (no files to compare with).\", True)\n else:\n for item in list_files:\n if not isinstance(item, list):\n __ex(\"Every list item must be a sub-list.\", True)\n if not len(item) == 2:\n __ex(\"Every sub-list must contain two items.\", True)\n\n input_path = os.path.abspath(input_path)\n for item in list_files:\n path_compare = os.path.abspath(str(item[0]))\n name_compare = str(item[1])\n if input_path == path_compare:\n __ex(\"The %s and the %s file path must not be identical.\" %\n (name, name_compare), False)\n if os.path.exists(input_path) and os.path.exists(path_compare):\n if filecmp.cmp(input_path, path_compare, 0):\n __ex(\"The %s and %s file content must not be identical.\" %\n (name, name_compare), False)", "def get_data_from_files(path, filename):\n\n data_files = []\n\n if path:\n list_of_files = os.listdir(path)\n print(\"List of data files:\", list_of_files)\n\n for file in list_of_files:\n if filename in file:\n full_filepath = path + \"/\" + file\n data_files.append(full_filepath)\n #print(data_files)\n\n else:\n data_files = []\n #print(data_files)\n return data_files", "def inBoth(from_files):\n t_nof1 = []\n f_nof1 = []\n array_of_times = []\n for file in from_files:\n item = file.replace('_COMPLETE', '')\n if item in to_files:\n to = os.path.join('/ToNof1/archive', item)\n from_nof1 = os.path.join('/FromNof1', file)\n t_nof1.append(to)\n f_nof1.append(from_nof1)\n\n\n\n with open(\"TAT_From_Nof1.tsv\", 'w') as f:\n i = 0\n myHeader = \"Completed File\\tCompleted Time\\tSent File\\tSent Time\\tDelta\\n\"\n f.write(myHeader)\n while i < len(to_files):\n today = datetime.today()\n\n fName = os.path.basename(f_nof1[i])\n tName = os.path.basename(t_nof1[i])\n\n fTime = getDate(f_nof1[i])\n tTime = getDate(t_nof1[i])\n\n duration = (today - fTime)\n if duration.days < 90:\n delta = fTime - tTime\n seconds = (delta.total_seconds())\n minutes = seconds / 60.0\n hours = minutes / 60.0\n array_of_times.append(hours)\n delta = str(delta)\n fTime = str(fTime)\n tTime = str(tTime)\n myString = (fName + \"\\t\" + fTime + \"\\t\" + tName + \"\\t\" + tTime + \"\\t\" + delta + \"\\n\")\n f.write(myString)", "def test_get_filenames_in_path():\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir2 = os.path.join(tmpdir, \"tmp\")\n os.makedirs(tmpdir2, exist_ok=True)\n\n data = {\"A\": 1, \"B\": 2}\n json_file1 = os.path.join(tmpdir, \"a.json\")\n json_file2 = os.path.join(tmpdir2, \"a.json\")\n dump_data(data, json_file1)\n dump_data(data, json_file2)\n\n # These should not get included.\n toml_file1 = os.path.join(tmpdir, \"b.toml\")\n toml_file2 = os.path.join(tmpdir2, \"b.toml\")\n dump_data(data, toml_file1)\n dump_data(data, toml_file2)\n\n filenames = list(get_filenames_in_path(tmpdir, \"a.json\"))\n assert filenames == [json_file1, json_file2]", "def subtitlePath_and_subtitleFileData(self,file_path_list):\n subtile_file_data = [] \n for x in file_path_list:\n subtile_file_data.append((x, self.guessFileData(x)))\n return subtile_file_data", "def main(file1, file2, uniq1=False, uniq2=False, union=False, tab=False, col1=1, col2=1):\n delimiter = \"\\t\" if tab else \",\"\n\n idx1 = col1 - 1\n idx2 = col2 - 1\n\n # Figure out what the mode of operation is.\n show = ISECT\n show = UNIQ1 if uniq1 else show\n show = UNIQ2 if uniq2 else show\n show = UNION if union else show\n\n if not os.path.isfile(file1):\n print(f\"file not found: {file1}\")\n sys.exit(1)\n\n # Get a stream for each file\n stream1 = get_stream(file1)\n stream2 = get_stream(file2)\n\n # Process the file.\n process(stream1=stream1, stream2=stream2, delimiter=delimiter, idx1=idx1, idx2=idx2, show=show)", "def one_to_all(self, chain1, direc2, file2_list, norm_lngth=None):\n lngth_val, shrt_val, lng_val, avg_val = self.def_options(norm_lngth)\n one_to_all_list = []\n with open(file2_list, 'r') as fl:\n flines = fl.read().splitlines()\n for c2 in flines:\n c2_file = os.path.join(direc2, c2)\n res = self.default(chain1, c2_file, length=lngth_val, \\\n shrt=shrt_val, lng = lng_val, avg = avg_val)\n one_to_all_list.append(self.parse_res(res))\n \n return one_to_all_list", "def join(self, path, *paths):", "def compare_files(file1, file2):\n return filecmp.cmp(file1, file2)", "def compare_files(input_index_file, output_index_file ):\n \n # -------------\n # open the input index file for reading\n # -------------\n input_set = open_read_file(input_index_file)\n\n # -------------\n # open the output index file for reading\n # -------------\n output_set = open_read_file(output_index_file)\n\n # -------------\n # get the difference in the files where\n # the input_set is the larger set\n # -------------\n unproc_files = set_difference(output_set, input_set)\n #print unproc_files\n\n return unproc_files", "def diffch(dir1,dir2,outfile=None):\n for ff in sorted(os.listdir(dir1)):\n if re.search('.c$',ff) or re.search('.h$',ff):\n f1 = dir1 + ff\n f2 = dir2 + ff\n if outfile is None:\n print 'start diff ',f1,f2\n os.system('diff %s %s' % (f1,f2))\n print 'end diff ',f1,f2\n else:\n ofp = open(outfile,'a')\n ofp.write('start diff %s %s\\n' % (f1,f2))\n ofp.close()\n os.system('diff %s %s >> %s' % (f1,f2,outfile))\n ofp = open(outfile,'a')\n ofp.write('end diff %s %s\\n' % (f1,f2))\n ofp.close()", "def shared_words_from_filenames(filename1, filename2):\r\n\r\n \"\"\"\r\n filename1 = tokenize(text1)\r\n filename2 = tokenize(text2)\r\n\r\n list3 = set(filename1) & set(filename2)\r\n\r\n return list3\r\n\r\n \"\"\"\r\n with open(filename1, encoding=\"utf8\") as f1, open(filename2, encoding=\"utf8\") as f2:\r\n\r\n wordsFile1 = [];\r\n wordsFile2 = [];\r\n result = [];\r\n\r\n lines = [line.strip() for line in f1] # create a set of words from file 1\r\n for line in lines:\r\n tokenizedline = tokenize(line.replace('\\ufeff', ''));\r\n for word in tokenizedline:\r\n wordsFile1.append(word);\r\n\r\n lines = [line.strip() for line in f2] # create a set of words from file 1\r\n for line in lines:\r\n tokenizedline = tokenize(line.replace('\\ufeff', ''));\r\n for word in tokenizedline:\r\n wordsFile2.append(word);\r\n\r\n # now loop over each line of other file\r\n\r\n for word in wordsFile1:\r\n if word in wordsFile2 and word != ' ': # if word in File 1 is found in File 2 then print it\r\n result.append(word)\r\n\r\n return result", "def save_double_list(list1, list2, filename):\r\n the_file = open(filename, \"wb\")\r\n try:\r\n writer = csv.writer(the_file)\r\n if len(list1)!=len(list2):\r\n raise Exception(\"Saving a double list : The list have not the same length !\")\r\n for i in range(len(list1)):\r\n writer.writerow( (list1[i], list2[i]) ) \r\n finally:\r\n the_file.close()", "def test_differ_times_two_files(generate_differ_times_two_files):\n fname = generate_differ_times_two_files\n with pytest.raises(Exception):\n process_files([fname[0], fname[1]])", "def readFiles(trainFile, testFile):\r\n\r\n\t# Open both files and split into lines\r\n\twith open(trainFile) as f:\r\n\t\ttrainLines = f.read().splitlines()\r\n\r\n\twith open(testFile) as f:\r\n\t\ttestLines = f.read().splitlines()\r\n\r\n\t\t\r\n\t# Extract training data\r\n\tfor line in trainLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttrainingData.append(row)\r\n\t\t\r\n\t\t\r\n\t# Extract testing data\r\n\tfor line in testLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttestData.append(row)", "def test_process_two_filenames(generate_expected_two_files):\n # create local variables and run fixtures\n einfo = generate_expected_two_files\n expected = einfo['expected']\n fname = einfo['file_names']\n results = process_files([fname['stress'], fname['strain']])\n # compare the pifs\n A = results.properties[0].scalars\n B = expected['stress'].properties[0].scalars\n C = results.properties[1].scalars\n D = expected['strain'].properties[0].scalars\n assert np.array_equal(A, B), \\\n 'Results and expected pifs differ in stress values'\n assert np.array_equal(C, D), \\\n 'Results snd expected pifs differ in strain values'\n assert getattr( results, 'uid', None) is None, \\\n 'Result UID should be None'\n assert getattr(results, 'names', None) is None, \\\n 'Result should not be named'\n assert getattr(results, 'classifications', None) is None, \\\n 'Result should not have any classifications.'\n assert len(results.properties) == \\\n len(expected['stress'].properties) + \\\n len(expected['strain'].properties), \\\n 'The length of the result and expected properties lists do not match.'\n assert getattr(results, \"ids\", None) is None, \\\n 'Result ids should be None'\n assert getattr(results, 'source', None) is None, \\\n 'Result source should be None'\n assert getattr(results, 'quantity', None) is None, \\\n 'Result quantity should be None'\n assert getattr(results, 'preparation', None) is None,\\\n 'Result preparation should be None'\n assert getattr(results, \"subSystems\", None) is None, \\\n 'Results subSystem should be None'\n assert getattr(results, 'references', None) is None,\\\n 'Results references should be None'\n assert getattr(results, 'contacts', None) is None, \\\n 'Results contacts should be None'\n assert getattr(results, 'licenses', None) is None,\\\n 'Results licenses should be None'\n assert getattr(results,'tags', None) is None,\\\n 'Results tags should be None'", "def list_and_merge_files(paths):\n result = [list_files(path) for path in paths]\n Xs, ys = zip(*result)\n return np.hstack(Xs), np.hstack(ys)", "def read_lists(path_to_lists, taille,taille_sup):\n\n os.chdir(path_to_lists)\n\n heme = [i.strip() for i in open(\"heme.list\").readlines()]\n heme_sample = data_sample(heme,taille_sup)\n\n steroid = [i.strip() for i in open(\"steroid.list\").readlines()]\n steroid_sample = data_sample(steroid,len(steroid))\n\n nucleotide = [i.strip() for i in open(\"nucleotide.list\").readlines()]\n nucleotide_sample = data_sample(nucleotide,taille)\n\n control = [i.strip() for i in open(\"control.list\").readlines()]\n control_sample = data_sample(control,taille)\n\n #data_total = heme_sample+nucleotide_sample+control_sample+steroid_sample\n\n data_total = heme_sample+nucleotide_sample+control_sample\n return data_total,heme,steroid,nucleotide,control", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def open_read_append_new_file(file1, file2):\n\n # open the first file in the read mode\n with open(file1) as fin:\n # read all lines into a list\n lst = fin.readlines()\n\n # reverse the list\n lst.reverse()\n\n # open second file for pending\n fout = open(file2, \"a\")\n\n # write reversed lines to a second file\n fout.writelines(lst)\n\n # close the second file\n fout.close()", "def load_data_and_labels(filename1, filename2):\n\n x_text = []\n fi = []\n\n nocnt = 0\n with open(filename1, \"r\", encoding='utf-8') as file_to_read, open(filename2, \"w+\", encoding='utf-8') as file_to_write:\n while True:\n lines = file_to_read.readline()\n if not lines:\n break\n pass\n # train文件处理开始\n (number, text, fine, law) = lines.split('\\t')\n # text = clean_str(text) # train文件处理结束\n\n # tmp1 = re.compile(PATTERN).findall(text)\n\n # if len(tmp1) > 1:\n # vwrite = tmp1[0]\n # file_to_write.write(vwrite + \"\\n\")\n #\n # tmp1 = re.compile(PATTERN).findall(text)\n\n if len(text) > 1000:\n file_to_write.write(text + \"\\n\")\n\n pass\n\n print(nocnt)\n return x_text, fi", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def create_test_input_files(input1, input2):\n random.shuffle(input1)\n random.shuffle(input2)\n filename1 = application.join_abs_path(EMPTY_TEST_DIR, 'file-1.gz')\n filename2 = application.join_abs_path(EMPTY_TEST_DIR, 'file-2.gz')\n\n with gzip.open(filename1, 'wb') as file1:\n file1.write('\\n'.join(input1))\n with gzip.open(filename2, 'wb') as file2:\n file2.write('\\n'.join(input2))", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def chk_chng(src_flist,dst_flist):\n uc_flist = []\n c_flist = []\n for files in src_flist:\n if files in dst_flist:\n uc_flist.append(files)\n else:\n c_flist.append(files)\n return uc_flist,c_flist", "def equal_files_to_json(self, equal_files, path_dir_1, path_dir_2):\n equal_files_json = []\n for fl in equal_files:\n file_path_1 = os.path.join(path_dir_1, fl)\n file_path_2 = os.path.join(path_dir_2, fl)\n path_in = self.make_path_in(file_path_1, file_path_2)\n\n file_size_1 = os.path.getsize(file_path_1)\n file_size_2 = os.path.getsize(file_path_2)\n equal_files_json.append(\n self.cmp_files_to_json(\n path_in,\n self.get_type(file_path_1),\n True,\n True,\n file_size_1,\n file_size_2,\n True,\n None)\n )\n return equal_files_json", "def get_execution_files(path, platform_A, platform_B):\n list_files = os.listdir(path) # Get all the files in the path\n platform_A_files = set([\n f.replace(f'{platform_A}.json', \"\") for f in list_files\n if f.endswith(f'{platform_A}.json')\n ])\n platform_B_files = set([\n f.replace(f'{platform_B}.json', \"\") for f in list_files\n if f.endswith(f'{platform_B}.json')\n ])\n print(platform_A_files)\n print(platform_B_files)\n intersection = list(platform_A_files.intersection(platform_B_files))\n return [\n (\n load_json(e + f'{platform_A}.json', folder=path),\n load_json(e + f'{platform_B}.json', folder=path),\n e[:-1] # last character is the '_'\n )\n for e in intersection\n ]", "def test_get_filepaths(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n \n #when\n test1 = get_filepaths(\"./dir1\", \".csv\")\n\n #result\n assert len(test1) == 2", "def join_infile_path(*paths):\n # Join path components\n path = '/'.join(paths)\n # Correct double slashes, if any is present\n path = path.replace('//', '/')\n\n return path", "def read_data_files(filenames, datapath, ids=None):\n filenames = np.array(filenames) # make sure it's array\n if ids is None:\n ids = range(0, len(filenames))\n\n for i in [filenames[k] for k in ids]:\n yield str(open(datapath+i, 'r').read())", "def cmpfile(file_left, file_right):\n nobv.visual_comparefile(file_left, file_right)", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def test_time_not_in_two_files(generate_no_time_two_files):\n fname = generate_no_time_two_files\n with pytest.raises(Exception):\n process_files([fname[0], fname[1]])\n # process_files(['resources/simple_stress.json', 'resources/simple_strain.json'])", "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "def join_paths(path_1, path_2):\r\n a = lib_path.join(path_1, path_2)\r\n return a", "def __compare_files(self, filename1, filename2):\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))\n with open(filename1, \"rb\") as f1:\n with open(filename2, \"rb\") as f2:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n self.assertEqual(f1.read(self.max_block_size), \\\n f2.read(self.max_block_size))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n self.assertEqual(f1.read(remaining), \\\n f2.read(remaining))", "def compare_tree(self):\n result = []\n \n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n filesA = [os.path.relpath(f,pathA) for f in self.tree(pathA)]\n filesB = [os.path.relpath(f,pathB) for f in self.tree(pathB)]\n\n filesAB = set(filesA).union(filesB)\n for fileAB in sorted(list(filesAB)):\n\n fileA = os.path.join(self.testpath,'A',fileAB)\n fileB = os.path.join(self.testpath,'B',fileAB)\n try:\n fileAtxt = open(fileA).read()\n except IOError:\n result.append( ('missing_inA',fileAB) )\n continue\n \n try:\n fileBtxt = open(fileB).read()\n except IOError:\n result.append( ('missing_inB',fileAB) )\n continue\n\n if not fileAtxt == fileBtxt:\n result.append( ('disagree',fileAB))\n \n return result", "def get_movie_data(files: list) -> list:\n pass", "def CompareFilenames(self, file1, file2):\n f1_segments = file1.split('/')\n f2_segments = file2.split('/')\n\n segment_ptr = 0\n while (segment_ptr < len(f1_segments) and\n segment_ptr < len(f2_segments) and\n f1_segments[segment_ptr] == f2_segments[segment_ptr]):\n segment_ptr += 1\n\n if len(f1_segments) == len(f2_segments):\n\n # we fell off the end, the paths much be the same\n if segment_ptr == len(f1_segments):\n return 0\n\n # we didn't fall of the end, compare the segments where they differ\n if f1_segments[segment_ptr] < f2_segments[segment_ptr]:\n return 1\n elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:\n return -1\n else:\n return 0\n\n # the number of segments differs, we either mismatched comparing\n # directories, or comparing a file to a directory\n else:\n\n # IF we were looking at the last segment of one of the paths,\n # the one with fewer segments is first because files come before\n # directories\n # ELSE we just need to compare directory names\n if (segment_ptr + 1 == len(f1_segments) or\n segment_ptr + 1 == len(f2_segments)):\n return len(f2_segments) - len(f1_segments)\n else:\n if f1_segments[segment_ptr] < f2_segments[segment_ptr]:\n return 1\n elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:\n return -1\n else:\n return 0", "def CompareFilenames(self, file1, file2):\n f1_segments = file1.split('/')\n f2_segments = file2.split('/')\n\n segment_ptr = 0\n while (segment_ptr < len(f1_segments) and\n segment_ptr < len(f2_segments) and\n f1_segments[segment_ptr] == f2_segments[segment_ptr]):\n segment_ptr += 1\n\n if len(f1_segments) == len(f2_segments):\n\n # we fell off the end, the paths much be the same\n if segment_ptr == len(f1_segments):\n return 0\n\n # we didn't fall of the end, compare the segments where they differ\n if f1_segments[segment_ptr] < f2_segments[segment_ptr]:\n return 1\n elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:\n return -1\n else:\n return 0\n\n # the number of segments differs, we either mismatched comparing\n # directories, or comparing a file to a directory\n else:\n\n # IF we were looking at the last segment of one of the paths,\n # the one with fewer segments is first because files come before\n # directories\n # ELSE we just need to compare directory names\n if (segment_ptr + 1 == len(f1_segments) or\n segment_ptr + 1 == len(f2_segments)):\n return len(f2_segments) - len(f1_segments)\n else:\n if f1_segments[segment_ptr] < f2_segments[segment_ptr]:\n return 1\n elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:\n return -1\n else:\n return 0", "def compare_files(self):\n\n first_backup_ids = set(self.first_source_data.keys())\n second_backup_ids = set(self.second_source_data.keys())\n\n for deleted_user_id in first_backup_ids.difference(second_backup_ids):\n self.changes[Constants.DELETED_USER]\\\n .append({\n Constants.ID: deleted_user_id,\n Constants.USER_TYPE: self.first_source_data[deleted_user_id][Constants.USER_TYPE]\n })\n\n for added_user_id in second_backup_ids.difference(first_backup_ids):\n self.changes[Constants.ADDED_USER]\\\n .append({\n Constants.ID: added_user_id,\n Constants.USER_TYPE: self.second_source_data[added_user_id][Constants.USER_TYPE]\n })\n\n for id in first_backup_ids.intersection(second_backup_ids):\n self.compare_id(id)", "def lines(a, b):\n\n same_lines = []\n\n # creating a list with all the lines in file1\n linesA = a.split(\"\\n\")\n linesA = [i.rstrip(\"\\r\") for i in linesA]\n\n # creating a list with all the lines in file2\n linesB = b.split(\"\\n\")\n linesB = [i.rstrip(\"\\r\") for i in linesB]\n\n for line in linesA:\n if line in linesB and line not in same_lines:\n same_lines.append(line)\n\n return same_lines", "def gather_data(instance):\n paths = [instance.get('file1'), instance.get('file2')]\n if Path(instance.get('file1')).parents[0].is_dir() is True and Path(\n instance.get('file2')).parents[0].is_dir() is True:\n files = [f for f in paths if os.path.isfile(f)]\n if len(files) == 0:\n raise Exception('The files you passed do not exist!')\n dfs = []\n for file in files:\n try:\n if file.endswith('.csv'):\n dfs.append(pd.read_csv(file))\n\n else:\n raise Exception('Please pass a file ending in .csv')\n\n except Exception as exc:\n formatted = \"Unable to locate files! Please ensure you have provided accurate file paths. {}\".format(\n repr(exc))\n raise Exception(formatted)\n\n return dfs, instance\n\n else:\n raise Exception('Please pass a valid file path.')", "def readdata(self, filepaths):\n pass", "def combine_data_main(data1,data2,lookup,foutput):\n\n # Get the maximum number of ortholog probesets we'll have to append\n max_orthologs = 0\n for probe_set_id in data1.keys():\n max_orthologs = max(max_orthologs,len(lookup(probe_set_id)))\n logging.debug(\"Max_orthologs = %d\" % max_orthologs)\n \n # Write header line\n line = [data1.header()]\n for i in range(1,max_orthologs+1):\n logging.debug(\"Adding header set #%d\" % i)\n for item in data2.header().split('\\t'): line.append(\"%s_%s\" % (item,i))\n foutput.write(\"%s\\n\" % '\\t'.join(line))\n\n # Append data\n for probe_set_id in data1.keys():\n # Build line to output to file\n line = [data1.fetch(probe_set_id)]\n # Get the corresponding ortholog probe set ID(s)\n logging.debug(\"Processing probe set ID %s\" % probe_set_id)\n for ortholog_probe_set_id in lookup(probe_set_id):\n ortholog_data = data2.fetch(ortholog_probe_set_id)\n if ortholog_data is not None:\n line.append(ortholog_data)\n # Write line to file\n foutput.write(\"%s\\n\" % '\\t'.join(line))", "def first_import(file, list):\n\n list.append(file)\n print(\"Path added to list\")", "def merge(fileHandle1, fileHandle2, outputFileHandle):\n line2 = fileHandle2.readline()\n for line1 in fileHandle1.readlines():\n while line2 != '' and line2 <= line1:\n outputFileHandle.write(line2)\n line2 = fileHandle2.readline()\n outputFileHandle.write(line1)\n while line2 != '':\n outputFileHandle.write(line2)\n line2 = fileHandle2.readline()", "def merge_csv_files(filelist):\n data = tables.DictList()\n for file_name in filelist:\n reader = csv.DictReader(open(file_name))\n data += list(reader)\n\n return data", "def Method_two(file1,file2):\n start = time.time()\n\n verified_element=set(file1).intersection(file2)\n\n print(len(verified_element))\n print('Duration: {} seconds'.format(time.time() - start))", "def redisperse_list(files,dw,w1,w2,key='spec'):\r\n input_list = ','.join(files)\r\n disp_files = [f.replace(key, key+'-disp') for f in files]\r\n output_disp_list = ','.join(disp_files)\r\n iraf.unlearn('dispcor')\r\n iraf.dispcor.input = input_list\r\n iraf.dispcor.output = output_disp_list\r\n # keep existing wavelength endpoints\r\n iraf.dispcor.dw = dw\r\n iraf.dispcor.w1 = w1\r\n iraf.dispcor.w2 = w2\r\n iraf.dispcor.flux = 'no'\r\n iraf.dispcor()\r\n # write text files\r\n for output in disp_files:\r\n iraf.wspectext(output, output.replace('fits', 'txt'), header=\"no\")\r\n\r\n return disp_files", "def test_stress_strain_both_files(generate_two_files_both_stress_strain):\n fname = generate_two_files_both_stress_strain\n with pytest.raises(Exception):\n process_files([fname[0],fname[1]])", "def _sift(self, fileslist, **arguments):\n\n def sort(reverse, arg, fileslist=fileslist):\n tdict = {fileslist[i][arg] : i for i in xrange(len(fileslist))}\n keys = tdict.keys()\n keys.sort(reverse=reverse)\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n return fileslist\n\n # for time\n if arguments.get('name'):\n reverse = None\n if arguments['name'] == 'reverse':\n reverse = True\n elif arguments['name'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'server_filename')\n\n # for size\n if arguments.get('size'):\n reverse = None\n if arguments['size'] == 'reverse':\n reverse = True\n elif arguments['size'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'size')\n\n # for size\n if arguments.get('time'):\n reverse = None\n if arguments['time'] == 'reverse':\n reverse = True\n elif arguments['time'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'local_mtime')\n\n # for head, tail, include, exclude\n head = args.head\n tail = args.tail\n include = args.include\n exclude = args.exclude\n if head or tail or include or exclude:\n tdict = {fileslist[i]['server_filename'] : i for i in xrange(len(fileslist))}\n keys1 = [i for i in tdict.keys() if i.lower().startswith(head.encode('utf8').lower())] \\\n if head else []\n keys2 = [i for i in tdict.keys() if i.lower().endswith(tail.decode('utf8').lower())] \\\n if tail else []\n keys3 = [i for i in tdict.keys() if re.search(include, i.encode('utf8'), flags=re.I)] \\\n if include else []\n keys4 = [i for i in tdict.keys() if not re.search(exclude, i.encode('utf8'), flags=re.I)] \\\n if exclude else []\n\n # intersection\n keys = [i for i in [keys1, keys2, keys3, keys4] if i]\n if len(keys) > 1:\n tkeys = keys[0]\n for i in keys:\n tkeys &= i\n keys = tkeys\n elif len(keys) == 1:\n keys = keys[0]\n elif len(keys) == 0:\n keys = []\n\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n\n dirs = [i for i in fileslist if i['isdir']]\n files = [i for i in fileslist if not i['isdir']]\n if arguments.get('desc') == 1:\n dirs.reverse()\n files.reverse()\n fileslist = dirs + files\n\n return fileslist", "def lines(a, b):\n # Turn split versions of both files into sets to remove duplicates\n # Split Lines used to automatically split at the end of a line. No need for \"\\n\" this way\n a1 = set(a.splitlines())\n b1 = set(b.splitlines())\n\n return a1 & b1", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def make_diff(self, file_path_1, file_path_2, path_in):\n hash_ = hash(path_in)\n\n with open(file_path_1) as file_1:\n with open(file_path_2) as file_2:\n d = difflib.Differ()\n\n diff = list(d.compare(file_1.readlines(), file_2.readlines()))\n _diff = []\n\n for i in range(len(diff)-1):\n if(diff[i][0] == '+' or diff[i][0] == '-'):\n if(diff[i].find('Copyright') == -1 and\n diff[i].find(\"generated by:\") == -1):\n _diff.append(diff[i])\n\n if _diff:\n _diff = ''.join(_diff)\n try:\n f = open('./diff/' + str(hash_) + '.diff', 'w')\n except:\n os.mkdir('./diff')\n f = open('./diff/' + str(hash_) + '.diff', 'w')\n f.write(_diff)\n f.close()\n return str(hash_)+'.diff'\n return None", "def test_upload_dir_contents_multiple_files(self):\n self._test_upload_dir_contents(filenames=['file1', 'file2'])", "def process_file_list(filePaths):\n processArgs = [dbd2asc_path, '-c', '/tmp']\n\n for filePath in filePaths:\n processArgs.append(filePath)\n\n stream, returncode = generate_stream(processArgs)\n\n # Fallback in case the cache is not available\n if returncode == 1:\n for filePath in filePaths:\n if not can_find_bd_index(filePath):\n raise KeyError(\n \"Cannot find data file index for: {}\".format(filePath)\n )\n\n # Reprocess the file list\n stream, returncode = generate_stream(processArgs)\n\n return stream", "def stage_input_file(workdir_path, files):\n if not isinstance(files, list):\n files = [files]\n\n for file_dict in files:\n location = urlparse(file_dict['location'])\n if 'basename' in file_dict:\n dest_path = os.path.join(workdir_path, file_dict['basename'])\n else:\n dest_path = os.path.join(workdir_path, os.path.basename(location.path))\n shutil.copy(location.path, dest_path)\n file_dict['path'] = dest_path\n\n for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):\n stage_input_file(workdir_path, file_dict['secondaryFiles'][i])", "def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def _file_comparator(a_file, b_file):\n if a_file[\"dir_name\"] == b_file[\"dir_name\"]:\n return _same_dir_file_comparator(a_file, b_file)\n\n return _diff_dir_file_comparator(a_file, b_file)", "def copy_files_individually(source_files, target_dir):\n\tfor source_file in source_files:\n\t\thead_tail = os.path.split(source_file)\n\t\tcopy_file(source_file, os.path.join(target_dir, head_tail[1]))", "def ingestData(paths):\n\tif isinstance(paths, list):\n\t\treturn imageFilePaths(paths)\n\telif isinstance(paths, str):\n\t\treturn imageFilePaths([paths])\n\telse:\n\t\treturn None", "def combine_files(files: list):\n\n headers = []\n rf_data = None\n fs = constants.FILE_FS\n\n # read all the RF files\n for filename in files:\n print(\"processing {}\".format(filename))\n header, rows = read_rf_file(filename)\n headers.append(header)\n new_rf_data = np.squeeze(rows[\"fft_bins\"])\n\n if rf_data is None:\n rf_data = new_rf_data\n else:\n rf_data = np.maximum(rf_data, new_rf_data)\n\n return headers, rf_data", "def test_merge_two_two():\n run_merge([1, 3], [2, 4], [1, 2, 3, 4])", "def test_collect_files():\n filelist = [\"test/a.ext\", \"test/b.asd\"]\n\n result = loader.collect_files(filelist, lambda x: x, lambda x: np.arange(0, 50))\n\n for k in filelist:\n assert np.array_equal(np.arange(0, 50), result[k])", "def read_list_file(path_file):\n with open(path_file,'r') as f_in:\n lines = f_in.readlines()\n lines = [x for x in lines if not (x.strip() == '' or x.strip()[0] == '#')]\n left_file_list = []\n right_file_list = []\n gt_file_list = []\n conf_file_list = []\n for l in lines:\n to_load = re.split(',|;',l.strip())\n left_file_list.append(to_load[0])\n right_file_list.append(to_load[1])\n if len(to_load)>2:\n gt_file_list.append(to_load[2])\n if len(to_load)>3:\n conf_file_list.append(to_load[3])\n return left_file_list,right_file_list,gt_file_list,conf_file_list", "def update_data(source_data: List[Dict], target_data: List[Dict]) -> List[Dict]:\n for i in source_data:\n for j in target_data:\n # Check if the IDs match\n if i.get('bongoId') == j.get('systemId') or \\\n i.get('id') == j.get('id'):\n # update title, artists and genres\n update_title(i, j)\n update_artists(i, j)\n update_genres(i, j)\n return target_data", "def get_data_in_paths(dfile, paths):\n for pth in paths:\n for f in os.listdir(pth):\n if f == dfile:\n return os.path.abspath(os.path.join(pth, dfile))", "def compare_files(fp1, fp2):\n\n line1 = fp1.readline()\n line2 = fp2.readline()\n\n while line1 and line2:\n if line1.startswith('#') and line2.startswith('#'):\n pass\n elif not line1 == line2:\n return False\n \n line1 = fp1.readline()\n line2 = fp2.readline()\n\n if line1 or line2:\n return False\n\n return True", "def p_and_l_from(files):\n if isinstance(files, str):\n files = [files]\n paths = []\n labels = []\n for file in files:\n print(f'read {file}')\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(' ')\n paths.append(line[0])\n labels.append(int(line[1]))\n return [paths, labels]", "def diff_files_to_json(self, diff_files, path_dir_1, path_dir_2):\n diff_files_json = []\n for fl in diff_files:\n file_path_1 = os.path.join(path_dir_1, fl)\n file_path_2 = os.path.join(path_dir_2, fl)\n path_in = self.make_path_in(file_path_1, file_path_2)\n\n file_size_1 = os.path.getsize(file_path_1)\n file_size_2 = os.path.getsize(file_path_2)\n\n hash_ = self.make_diff(file_path_1, file_path_2, path_in)\n if hash_ is not None:\n diff_files_json.append(\n self.cmp_files_to_json(\n path_in,\n self.get_type(file_path_1),\n True,\n True,\n file_size_1,\n file_size_2,\n False,\n '['+hash_+'](./diff/'+hash_+')'))\n else:\n diff_files_json.append(\n self.cmp_files_to_json(\n path_in,\n self.get_type(file_path_1),\n True,\n True,\n file_size_1,\n file_size_2,\n True,\n hash_\n )\n )\n return diff_files_json", "def execute_processor(self):\n \n # pull in the parameter that has the file names we will process\n filename1 = self.param_dict['file1']\n filename2 = self.param_dict['file2']\n \n ''' these next 2 lines are the ones that I added to create a dummy row '''\n right_dummy = self.create_dummy_row( self.param_dict['dummy_rec_right'])\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n \n\n self.open_files(os.path.join(self.entry.working_directory,filename1), os.path.join(self.entry.working_directory,filename2))\n self.process_params()\n key_dict = self.create_key_match()\n file1_rec = self.read_file1(first=True)\n file2_rec = self.read_file2(first=True)\n \n file2_used = False\n \n # call the convenience method to setup the temp_csv file. This will also write the header row by default\n self.setup_csv_temp_writer(self.get_temp_csv_name(), self.get_header(self.file1_reader.fieldnames,self.file2_reader.fieldnames),preserve_order=True)\n \n while file1_rec:\n combined = {k:v for k,v in file1_rec.items()}\n if file2_rec and self.get_key(file2_rec,self.file2_key) == self.get_key(file1_rec,self.file1_key):\n # merge these two bad boys\n combined.update(self.get_values(file2_rec))\n file2_used = True\n ### WRITE ###\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n elif file2_rec and self.get_key(file1_rec,self.file1_key) > self.get_key(file2_rec,self.file2_key):\n if not file2_used and left_dummy:\n ''' left side dummy \n now use the already created dummy_row to updated the dictionary '''\n left_dummy.update(self.get_values(file2_rec))\n key_fields = {key_dict[k]:file2_rec[k] for k in self.file2_key.split(\",\")}\n left_dummy.update(key_fields)\n self.write_temp_rec(left_dummy)\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n file2_rec = self.read_file2()\n file2_used = False\n \n elif not file2_rec or self.get_key(file1_rec,self.file1_key) < self.get_key(file2_rec,self.file2_key):\n ### WRITE REC WITH NO MATCH ###\n if self.keep_nomatch:\n ''' right side dummy\n now use the already created dummy_row to updated the dictionary '''\n if right_dummy:\n combined.update(self.get_values(right_dummy))\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n else:\n raise Exception\n self.close_temp_csv()\n return 0", "def _separate_file_list( file_list, target_locus ):\n log.info(\"Parsing locus-specific subread FOFN\")\n target_fasta = None\n other_fasta = []\n print file_list, target_locus\n for filename in file_list:\n basename = filename.split('.')[0]\n locus = basename.split('_')[-1]\n if locus == target_locus and target_fasta is None:\n target_fasta = filename\n elif locus == target_locus:\n msg = 'Multiple files for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n else:\n other_fasta.append( filename )\n if target_fasta is None:\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n return ( target_fasta, other_fasta )", "def dtw_list_store(source, target, source_list, target_list):\n\n dtw_source = []\n dtw_target = []\n\n fs, source = scipy.io.wavfile.read(source)\n fs, target = scipy.io.wavfile.read(target)\n\n\n #source = psf.mfcc(source, 16000)\n #target = psf.mfcc(target, 16000)\n\n source, energy = psf.fbank(source, 16000)\n target, energy = psf.fbank(target, 16000)\n\n distance, path = fastdtw(source, target, dist=euclidean)\n\n for vertex in path:\n dtw_source.append(source[vertex[0],:])\n dtw_target.append(target[vertex[1],:])\n\n dtw_source = np.array(dtw_source)\n dtw_target = np.array(dtw_target)\n\n\n source_list.append(dtw_source)\n target_list.append(dtw_target)", "def test_several_paths(id1, id2, expected_path_ids):\n syn1 = germanet_data.get_synset_by_id(id1)\n syn2 = germanet_data.get_synset_by_id(id2)\n paths = syn1.shortest_path(syn2)\n assert len(paths) == len(expected_path_ids), \"the number of found paths doesn't macht the true number of paths\"\n for path in paths:\n path = [synset.id for synset in path]\n np.testing.assert_equal(path in expected_path_ids, True)", "def mergefiles(dfs=[], on=''):\n if len(dfs) == 1:\n return \"List only have one element.\"\n\n elif len(dfs) == 2:\n df1 = dfs[0]\n df2 = dfs[1]\n df = df1.merge(df2, on=on)\n return df\n\n # Merge the first and second datafranes into new dataframe\n df1 = dfs[0]\n df2 = dfs[1]\n df = dfs[0].merge(dfs[1], on=on)\n\n # Create new list with merged dataframe\n dfl = []\n dfl.append(df)\n\n # Join lists\n dfl = dfl + dfs[2:] \n dfm = mergefiles(dfl, on)\n return dfm", "def verify_files(data_folder: str, file_names: list) -> list:\n files = list()\n for file_name in file_names:\n file_to_be_read = data_folder / file_name\n if file_to_be_read.is_file():\n print(\"file {} found\".format(file_name))\n files.append(file_to_be_read)\n else:\n print(\"file {} not found!\".format(file_name))\n return files", "def compare(file1, file2):\n process = subprocess.Popen([PBWT_BIN, 'compare', file1, file2],\n stdout=subprocess.PIPE)\n process_results(str(process.communicate()[0]))", "def merge(files, dst):\n\n if len(files) == 0:\n raise click.BadArgumentUsage(\"Please provide both input files and destination file\")\n\n if len(files) == 1:\n path = files[0]\n base, pattern = os.path.split(path)\n with fs.open_fs(base) as ffs:\n files = [\"{}{}\".format(base, match.path) for match in ffs.glob(pattern)]\n\n las_files = [pylas.read(openbin_file(f)) for f in IncrementalBar(\"Reading files\").iter(files)]\n\n try:\n with click_spinner.spinner():\n click.echo(\"Merging\")\n merged = pylas.merge(las_files)\n click.echo(\"Writing\")\n merged.write(openbin_file(dst, mode='w'), do_compress=dst.endswith('.laz'))\n\n except Exception as e:\n click.echo(click.style(str(e), fg=\"red\"))\n raise click.Abort()", "def all_to_one(self, direc1, file1_list, chain2, norm_lngth=None):\n lngth_val, shrt_val, lng_val, avg_val = self.def_options(norm_lngth)\n all_to_one_list = []\n with open(file1_list, 'r') as fl:\n flines = fl.read().splitlines()\n for c1 in flines:\n c1_file = os.path.join(direc1, c1)\n res = self.default(chain2, c1_file, length=lngth_val, \\\n shrt=shrt_val, lng = lng_val, avg = avg_val)\n all_to_one_list.append(self.parse_res(res))\n \n return one_to_all_list", "def data_merge(path, dataset_name=\"processed_data\"):\n files = glob.glob(path+\"**//\"+dataset_name+\".json\")\n logger.info(\"Found {} files under the path {}\".format(len(files),path))\n final_data = []\n\n for file in files:\n assert dataset_name in file\n data = json.load(open(file,\"r\",encoding=\"utf-8\"))\n final_data += data\n\n data_analysis(final_data)\n final_data = json.dumps(final_data,indent=4)\n new_file = open(path + \"//merged_data.json\", \"w+\", encoding=\"UTF-8\")\n new_file.writelines(final_data)", "def joinPath(path, *args):" ]
[ "0.6610108", "0.6356825", "0.635545", "0.6338313", "0.62560904", "0.61758494", "0.6088548", "0.6032201", "0.598844", "0.5927247", "0.58987236", "0.5861115", "0.5815601", "0.5770374", "0.57467705", "0.57203543", "0.57123935", "0.57011217", "0.56484956", "0.56306076", "0.56077754", "0.5605002", "0.55955654", "0.5562399", "0.5542114", "0.5504309", "0.54673487", "0.54642123", "0.54440105", "0.5404487", "0.5396164", "0.53940946", "0.53872734", "0.5366787", "0.53652614", "0.53567857", "0.5343271", "0.5342848", "0.5338637", "0.53265405", "0.5325122", "0.5318446", "0.5317543", "0.5311266", "0.5308826", "0.53022444", "0.52969545", "0.5289178", "0.52788377", "0.52738494", "0.5262917", "0.5260323", "0.5257001", "0.52479905", "0.5246923", "0.52422523", "0.5231758", "0.5231758", "0.5231582", "0.52241", "0.52184623", "0.5217668", "0.52095205", "0.5204199", "0.51977986", "0.5183491", "0.517847", "0.5177855", "0.51701695", "0.51665413", "0.51656073", "0.5163278", "0.5142931", "0.5139436", "0.5136646", "0.51332754", "0.5127093", "0.51246315", "0.51207316", "0.51164144", "0.5110476", "0.5101864", "0.5093796", "0.50876653", "0.5087575", "0.5085778", "0.5081108", "0.50792783", "0.50762326", "0.50664526", "0.5064731", "0.5055145", "0.5048964", "0.5044219", "0.5036421", "0.50334406", "0.5031212", "0.50309783", "0.50301784", "0.50229746", "0.5020877" ]
0.0
-1
Writes to first sequences(fasta) in a list to two temp files
def _input_as_lines(self,data): inputFiles = '' self._input_filename = [] for i in range(2): filename = self.getTmpFilename(self.WorkingDir) self._input_filename.append(filename) data_file = open(filename,'w') if i == 0: data_to_file = '\n'.join(data[:2]) tmp1 = filename else: data_to_file = '\n'.join(data[2:]) tmp2 = filename data_file.write(data_to_file) data_file.close() inputFiles = ' '.join([tmp1,tmp2]) return inputFiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_fasta_files(fastas_paths, out_file):\n with open(out_file, 'w') as out:\n for filename in fastas_paths:\n for seq_record in SeqIO.parse(filename, \"fasta\"):\n out.write('>' + str(seq_record.id) + '\\n' + str(seq_record.seq) + '\\n')", "def test_write_seqs_to_fasta(self):\r\n fd, output_fp = mkstemp(\r\n prefix=\"qiime_util_write_seqs_to_fasta_test\",\r\n suffix='.fasta')\r\n close(fd)\r\n self.files_to_remove.append(output_fp)\r\n seqs = [('s1', 'ACCGGTTGG'), ('s2', 'CCTTGG'),\r\n ('S4 some comment string', 'A')]\r\n exp = \">s1\\nACCGGTTGG\\n>s2\\nCCTTGG\\n>S4 some comment string\\nA\\n\"\r\n # works in write mode\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # calling again in write mode overwrites original file\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # works in append mode\r\n exp2 = exp + exp\r\n write_seqs_to_fasta(output_fp, seqs, 'a')\r\n self.assertEqual(open(output_fp).read(), exp2)", "def test_write_combined_fasta(self):\r\n\r\n mapping_data = {'%s' % basename(self.fasta1_fp): 'Sample1',\r\n '%s' % basename(self.fasta2_fp): 'Sample2',\r\n '%s' % basename(self.fasta3_fp): 'Sample3'\r\n }\r\n\r\n fasta_fps = [self.fasta2_fp, self.fasta3_fp, self.fasta1_fp]\r\n\r\n write_combined_fasta(mapping_data, fasta_fps, self.output_dir,\r\n counter=100)\r\n\r\n output_fp = open(join(self.output_dir, \"combined_seqs.fna\"), \"U\")\r\n output_lines = [line.strip() for line in output_fp]\r\n\r\n expected_output_lines = ['>Sample2_100 label3 ZZZ', 'AACGYAACGAGA',\r\n '>Sample2_101 label4', 'ACAGAGAGAGGGGAGA',\r\n '>Sample3_102 label5 ;LKJ', 'ACAGGGATTTTTAT',\r\n '>Sample1_103 label1 XXX', 'ACAGATTACGA',\r\n '>Sample1_104 label2 YYY', 'ACATAAAATAGCCGGAG'\r\n ]\r\n\r\n self.assertEqual(output_lines, expected_output_lines)", "def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def write(afile, seqs): \n for s in seqs :\n writeseq(afile, s)", "def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def writeFastaFile(filename,sequences):\n fhw=open(filename,\"w\")\n for id in sequences:\n fhw.write(\">\"+id+\"\\n\"+sequences[id]+\"\\n\")\n fhw.close()", "def generate_fasta(sequences, fasta_path):\n\n with open(fasta_path, 'w+') as f:\n for i in range(len(sequences)):\n f.write('>seq '+str(i))\n f.write('\\n')\n f.write(sequences[i])\n f.write('\\n')", "def test_assign_seqs_two_fastas(self):\r\n\r\n # Handles two fasta files alone\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = []\r\n #file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()", "def _writeOneFASTA(sequence, filehandle):\n filehandle.write(\">\" + sequence.getName()+\"\\n\")\n data = sequence.getSequence()\n lines = ( sequence.getLen() - 1) / 60 + 1\n for i in range(lines):\n #note: python lets us get the last line (var length) free\n #lineofseq = data[i*60 : (i+1)*60] + \"\\n\"\n lineofseq = \"\".join(data[i*60 : (i+1)*60]) + \"\\n\"\n filehandle.write(lineofseq)", "def write_seqs_to_fasta(fp, seqs, write_mode='w'):\r\n f = open(fp, write_mode)\r\n for s in seqs:\r\n f.write('>%s\\n%s\\n' % (s))\r\n f.close()", "def createSequenceFile(sequences, tmpDir, filename='seq.fa'):\n seqfile = os.path.join(tmpDir, filename)\n with open(seqfile, 'w') as f:\n for name, sequence in sequences.iteritems():\n f.write(\">{}\\n{}\\n\".format(name, sequence))\n subprocess.call(\"pyfasta flatten {}\".format(seqfile), shell=True)\n return seqfile", "def test_split_fasta_on_sample_ids_to_files(self):\r\n temp_output_dir = mkdtemp()\r\n self.dirs_to_remove.append(temp_output_dir)\r\n\r\n split_fasta_on_sample_ids_to_files(\r\n parse_fasta(self.fasta2),\r\n output_dir=temp_output_dir,\r\n per_sample_buffer_size=2)\r\n self.files_to_remove.extend(glob('%s/*fasta' % temp_output_dir))\r\n\r\n # confirm that all files are as expected\r\n self.assertEqual(open('%s/Samp1.fasta' % temp_output_dir).read(),\r\n \">Samp1_42\\nACCGGTT\\n>Samp1_43 some comme_nt\\nAACCG\\n>Samp1_44\\nA\\n\")\r\n self.assertEqual(open('%s/s2_a.fasta' % temp_output_dir).read(),\r\n \">s2_a_50\\nGGGCCC\\n\")\r\n self.assertEqual(open('%s/s3.fasta' % temp_output_dir).read(),\r\n \">s3_25\\nAAACCC\\n\")\r\n # confirm number of files is as expected\r\n self.assertEqual(len(glob('%s/*' % temp_output_dir)), 3)", "def write_sequence(list):\n pass", "def write_combined_fasta(fasta_name_to_sample_id,\r\n fasta_files,\r\n output_dir=\".\",\r\n counter=0):\r\n\r\n combined_file_out = open(join(output_dir + \"/\", \"combined_seqs.fna\"), \"w\")\r\n\r\n for curr_fasta in fasta_files:\r\n for label, seq in parse_fasta(open(curr_fasta, \"U\")):\r\n combined_file_out.write(\">%s_%d %s\\n\" %\r\n (fasta_name_to_sample_id[basename(curr_fasta)], counter, label))\r\n combined_file_out.write(\"%s\\n\" % seq)\r\n counter += 1", "def write_fasta(sequences_hash, output_fasta, concatenate_duplicates=True):\n with open(output_fasta, \"w+\") as fasta_object:\n for sequence in sequences_hash:\n if concatenate_duplicates:\n sequence_id = \"__\".join(sequences_hash[sequence])\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))\n else:\n sequence_id = sequence\n sequence = sequences_hash[sequence_id][0]\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))", "def test_write_Fasta_from_name_seqs_pairs(self):\r\n\r\n seqs = [('1', \"AAA\"), ('2', \"CCCCC\"), ('3', \"GGGG\")]\r\n\r\n # None fh raises Error\r\n self.assertRaises(\r\n ValueError,\r\n write_Fasta_from_name_seq_pairs,\r\n seqs,\r\n None)\r\n\r\n fd, tmp_filename = mkstemp(prefix=\"test_write_Fasta\",\r\n suffix=\".fna\")\r\n close(fd)\r\n fh = open(tmp_filename, \"w\")\r\n write_Fasta_from_name_seq_pairs(seqs, fh)\r\n fh.close()\r\n actual_seqs = list(parse_fasta(open(tmp_filename, \"U\")))\r\n remove(tmp_filename)\r\n\r\n self.assertEqual(actual_seqs, seqs)", "def test_sequences_to_file(self):\r\n\r\n fd, self.seq_test_fp = mkstemp(prefix='ExcludeByBlastTests_',\r\n suffix='.fasta')\r\n close(fd)\r\n self._paths_to_clean_up.append(self.seq_test_fp)\r\n\r\n ids = [\"bth:BT_0001\", \"hsa:8355\"]\r\n seqs = seqs_from_file(ids, open(self.query_fp).readlines())\r\n sequences_to_file(seqs, self.seq_test_fp)\r\n\r\n self.assertEqual(open(self.seq_test_fp).readlines(),\r\n open(self.query_fp).readlines())", "def writeSeqSitesToFiles(path, filenames, seqCharSitesDicList):\n ## filenames is a list of file anmes, get the number of total files\n nFiles = len(filenames)\n if not os.path.exists(path):\n os.makedirs(path)\n for i in range(0, nFiles):\n filepath = os.path.join(path, filenames[i])\n align_file = open(filepath, \"w\")\n sequenceSet = []\n sequenceSet.append(''.join(seqCharSitesDicList[i][\"t1\"]))\n sequenceSet.append(''.join(seqCharSitesDicList[i][\"t2\"]))\n records =[]\n for (index,seq) in enumerate(sequenceSet):\n records.append(SeqRecord(Seq(seq, IUPAC.unambiguous_dna), id= (\"t\"+str(index+1)), description=''))\n SeqIO.write(records, open(os.path.join(path, filenames[i]), \"w\"), \"fasta\") \n align_file.close()", "def remove_duplicates(file, number_of_fastas, path, output_name):\n\n path_to_pbds = path + 'Modeling/cleaned_template_pdbs/'\n path_to_fastas = path + 'Modeling/cleaned_template_fastas/'\n path_to_alignnment = path + 'Modeling/fasta_alns_and_identities/' + file\n fastas = parse_multifasta_file(path_to_alignnment, number_of_fastas)\n uniq_fastas = []\n with open(output_name, \"w\") as f:\n for i in range(number_of_fastas):\n name, seq = next(fastas)\n if seq not in uniq_fastas:\n uniq_fastas.append(seq)\n f.write('>' + name + '\\n')\n f.write(seq + '\\n')\n else:\n os.remove(path_to_pbds + name + '.pdb')\n os.remove(path_to_fastas + name + '.fasta')\n shutil.move(output_name, path + 'Modeling/fasta_alns_and_identities/')\n return len(uniq_fastas)", "def mafft_multiple_alignment(path, id_protein, output_name):\n\n path_to_templates = path + 'Modeling/cleaned_template_fastas/'\n path_to_target = path + id_protein + '.fasta'\n with open('fastas_for_mafft', 'w') as fastas:\n\n # write target fasta in joint file\n\n target = open(path_to_target)\n for line in target:\n fastas.write(line)\n fastas.write(line)\n target.close()\n\n # write templates fastas in joint file\n\n number_of_fastas = 1 # 1 is for target\n templates = next(os.walk(path_to_templates))[2]\n print(templates)\n for i in templates:\n number_of_fastas += 1\n with open(path_to_templates + i) as template:\n for line in template:\n fastas.write(line)\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/'\n os.system('mafft --localpair --maxiterate 1000 fastas_for_mafft > ' + path_to_alignment + output_name)\n # os.remove('fastas_for_mafft')\n return number_of_fastas", "def test_write_trunc_fasta(self):\r\n\r\n seq_order = ['seq1', 'seq2', 'seq3']\r\n\r\n seqs = {'seq1': 'ATCG', 'seq3': 'ACCC', 'seq2': 'GGACC'}\r\n\r\n output_dir = '/tmp/truncate_fasta_qual_test/'\r\n\r\n create_dir(output_dir)\r\n\r\n fasta_out_fp = output_dir + 'seqs_filtered.fna'\r\n\r\n write_trunc_fasta(seqs, fasta_out_fp, seq_order)\r\n\r\n expected_seqs = ['>seq1', 'ATCG', '>seq2', 'GGACC', '>seq3', 'ACCC']\r\n\r\n actual_fasta = open(fasta_out_fp, \"U\")\r\n\r\n actual_fasta = [line.strip() for line in actual_fasta]\r\n\r\n self.assertEqual(actual_fasta, expected_seqs)", "def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n # ensure that the pdb_names and pdb_sequences lists are the same length\n if len(pdb_names) != len(pdb_sequences):\n return False\n\n # add .txt to the filename, if needed\n if not filename.endswith(\".txt\"):\n filename += \".txt\"\n\n # write out the fasta file\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write(\">%s\\n%s\\n\") %(pdb_name, pdb_seq)", "def writeOutFileBadSeqRecord(badSeqList, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n \tfor seqRecord in badSeqList:\n out_file.write(\"\\t\".join(seqRecord) + \"\\n\")", "def setUp(self):\n self.temp_dir = tempfile.mkdtemp()\n\n self.seqs1 = ('>1\\n'\n 'ACUGCUAGCUAGUAGCGUACGUA\\n'\n '>2\\n'\n 'GCUACGUAGCUAC\\n'\n '>3\\n'\n 'GCGGCUAUUAGAUCGUA\\n')\n self.seqs1_fp = join(self.temp_dir, 'seq1.fa')\n with open(self.seqs1_fp, 'w') as f:\n f.write(self.seqs1)\n self.seqs1_aln = ('>1\\n---acugcuagcuaguagcguacgua\\n'\n '>2\\n------gcuacguagcuac-------\\n'\n '>3\\ngcggcuauuagaucgua---------\\n')\n self.seqs1_aln_fp = join(self.temp_dir, 'seq1_aln.fa')\n with open(self.seqs1_aln_fp, 'w') as f:\n f.write(self.seqs1_aln)\n\n self.seqs2 = ('>a\\nUAGGCUCUGAUAUAAUAGCUCUC\\n'\n '>b\\nUAUCGCUUCGACGAUUCUCUGAUAGAGA\\n'\n '>c\\nUGACUACGCAU\\n')\n self.seqs2_fp = join(self.temp_dir, 'seq2.fa')\n with open(self.seqs2_fp, 'w') as f:\n f.write(self.seqs2)\n\n self.add_seqs_aligned = (\">_seed_1\\n\"\n \"----------acugcuagcuaguagcguacgua\\n\"\n \">_seed_2\\n\"\n \"-------------gcuacguagcuac-------\\n\"\n \">_seed_3\\n\"\n \"-------gcggcuauuagaucgua---------\\n\"\n \">a\\n\"\n \"-------uaggcucugauauaauagcucuc---\\n\"\n \">b\\n\"\n \"uaucgcuucgacgauucucugauagaga-----\\n\"\n \">c\\n\"\n \"-------------------ugacuacgcau---\\n\")\n\n self.align1 = (\">seq_0\\nACUGCUAGCUAGUAGCGUACGUA\\n\"\n \">seq_1\\nGCUACGUAGCUAC----------\\n\"\n \">seq_2\\nGCGGCUAUUAGAU------CGUA\\n\")\n self.align1_fp = join(self.temp_dir, 'align1.fa')\n with open(self.align1_fp, 'w') as f:\n f.write(self.align1)\n self.align2 = (\">a\\nUAGGCUCUGAUAUAAUAGCUCUC---------\\n\"\n \">b\\nUA----UCGCUUCGACGAUUCUCUGAUAGAGA\\n\"\n \">c\\nUG------------ACUACGCAU---------\\n\")\n self.align2_fp = join(self.temp_dir, 'align2.fa')\n with open(self.align2_fp, 'w') as f:\n f.write(self.align2)\n self.align_two_align = (\">seq_0\\n\"\n \"--------------acugcuagcuaguagcguacgua\\n\"\n \">seq_1\\n\"\n \"--------------gcuacguagcuac----------\\n\"\n \">seq_2\\n\"\n \"--------------gcggcuauuagau------cgua\\n\"\n \">a\\n\"\n \"uaggcucugauauaauagcucuc--------------\\n\"\n \">b\\n\"\n \"ua----ucgcuucgacgauucucugauagaga-----\\n\"\n \">c\\n\"\n \"ug------------acuacgcau--------------\\n\")", "def write_output(output,fasta,CDR1_pos,CDR2_pos):\n # fasta file is the igblast input file\n with open(output, 'w') as f:\n header = \"\\t\".join(['Name', 'CDRL1_kabat_AA', 'CDRL2_kabat_AA'])\n f.write(header + '\\n')\n for record in SeqIO.parse(fasta, \"fasta\"):\n ID = str(record.id)\n seq = str(record.seq)\n CDR1_aa=''\n CDR2_aa = ''\n CDR1_index = CDR1_pos[ID]\n CDR2_index = CDR2_pos[ID]\n if CDR1_index != []:\n CDR1_start, CDR1_end = fix_aa_pos((int(CDR1_index[0]) - 1), int(CDR1_index[1]))\n CDR1_nuc = seq[CDR1_start:CDR1_end]\n CDR1_aa = translation(CDR1_nuc)\n if CDR2_index != []:\n CDR2_start, CDR2_end = fix_aa_pos((int(CDR2_index[0]) - 1), int(CDR2_index[1]))\n CDR2_nuc = seq[CDR2_start:CDR2_end]\n CDR2_aa = translation(CDR2_nuc)\n f.write(\"\\t\".join([ID, CDR1_aa, CDR2_aa]) + '\\n')", "def one_file(file_list, file_path, aa, n, idx=None,raa=None):\r\n \r\n if os.path.isdir(file_path):\r\n file_name = f'{idx}_{n}n.csv'\r\n file_path = os.path.join(file_path, file_name)\r\n elif os.path.isfile(file_path):\r\n file_path = file_path\r\n with open(file_path, 'w') as handle:\r\n h = csv.writer(handle)\r\n for idx, file in enumerate(file_list):\r\n f = open(file, 'r')\r\n seq = read_fasta(f)\r\n simple_seq = reduce(seq, aa, raa)\r\n if not raa:\r\n raa = [i[0] for i in aa]\r\n base_aac = seq_aac(simple_seq, raa, n)\r\n for a in base_aac:\r\n line0 = [v for v in a[1]]\r\n line1 = [idx] + line0\r\n h.writerow(line1)\r\n f.close()", "def test_assign_seqs_fasta_only(self):\r\n\r\n # Initial test for single fasta file alone.\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = []\r\n #file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = \">s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n\"\r\n\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'AACTCGTCGATG': 1, 'AGCAGCACTTGT': 1,\r\n 'ACCGCAGAGTCA': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def zip_files(file_list, output_path):\n bname = os.path.basename # for efficiency\n with zipfile.ZipFile(output_path, mode='w') as zf:\n # adding all fasta files\n for file_name in file_list:\n zf.write(file_name, bname(file_name))\n return output_path", "def write_trunc_fasta(trunc_fasta_seqs,\r\n fasta_out_fp,\r\n seq_order):\r\n\r\n fasta_out = open(fasta_out_fp, \"w\")\r\n\r\n for label in seq_order:\r\n trunc_label = label.split()[0].strip()\r\n fasta_out.write(\">%s\\n%s\\n\" % (label, trunc_fasta_seqs[trunc_label]))", "def write_sequence(self):\n\n staves = self.get_sequence()\n\n with open(self.output_file, 'w') as out_file:\n\n print()\n out_file.write('')\n for num, staff in enumerate(staves):\n #out_file.write(('Sequence staff # ' + str(num) + '\\n' + staff + '\\n'))\n out_file.write((staff + '\\n'))\n print('Sequence staff #', num)\n print(staff,'\\n')\n out_file.write('')\n print()\n\n out_file.close()", "def main():\r\n\timport sys\r\n\r\n\tlistofSequences = FastAreader(sys.stdin).readFasta() \r\n\tPAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.\r\n\tf = open('Guide Sequences.txt','w') \r\n\tfor i in range(len(PAMSequences[0])):\r\n\t\tf.write(PAMSequences[0][i]) # Prints the header sequence into the file.\r\n\t\tf.write('\\n') \r\n\t\tprint(PAMSequences[0][i]) \r\n\t\tfor j in range(len(PAMSequences[1][i])): \r\n\t\t\tif j == 0: \r\n\t\t\t\tf.write(\"Forward Strand PAM Sites:\") \r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Forward Strand PAM Sites:\") \r\n\t\t\tprint(PAMSequences[1][i][j]) # Prints the forward sequences\r\n\t\t\ty = str(PAMSequences[1][i][j]) # Changes from int to string characters.\r\n\t\t\tx = ''.join(y) # Joining all the string values so we can print to file.\r\n\t\t\tf.write(x) # Write the joined forward sequences to the file.\r\n\t\t\tf.write('\\n')\r\n\t\tfor k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward. \r\n\t\t\tif k == 0:\r\n\t\t\t\tf.write(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\tprint(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions. \r\n\t\t\ta = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.\r\n\t\t\tb = ''.join(a)\r\n\t\t\tf.write(b) # Write all of the reverse sequences onto the text file with their positions. \r\n\t\t\tf.write('\\n')\r\n\tf.close() # Close the file.\r", "def write_Fasta_from_name_seq_pairs(name_seqs, fh):\r\n if fh is None:\r\n raise ValueError(\"Need open file handle to write to.\")\r\n\r\n for (name, seq) in name_seqs:\r\n fh.write(\"%s\\n\" % BiologicalSequence(seq, id=name).to_fasta())", "def make_fasta(pair, filename, id):\n \n fname = filename + \"-R1.fasta\"\n with open(fname,\"w\") as r1:\n r1.write(\">\" + id + \"\\n\")\n r1.write(pair[0])\n r1.write(\"\\n\")\n \n fname = filename + \"-R2.fasta\"\n with open(fname,\"w\") as r2:\n r2.write(\">\" + id + \"\\n\")\n r2.write(pair[1])\n r2.write(\"\\n\")", "def test_fasta_one_sequence(self):\n record = list(SeqIO.parse(\"Registry/seqs.fasta\", \"fasta\"))[0]\n input_file = \"seq.fasta\"\n with open(input_file, \"w\") as handle:\n SeqIO.write(record, handle, \"fasta\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n self.add_file_to_clean(input_file)\n self.standard_test_procedure(cline)", "def test_split_fasta_equal_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 1, filename_prefix)\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(3)]\r\n\r\n self.assertEqual(actual, expected)\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def writeFASTA(sequence, filename):\n fh = open(filename, \"w\")\n if isinstance(sequence, Sequence):\n _writeOneFASTA(sequence, fh)\n else:\n for seq in sequence:\n if isinstance(seq, Sequence):\n _writeOneFASTA(seq, fh)\n else:\n print(\"Warning: could not write \" + seq.getName() + \" (ignored).\", file=sys.stderr)\n fh.flush()\n fh.close()", "def create_verified_fasta(listOfFile, PROTEIN_FUNCTION, data_fasta, info_dat):\n\n\tprint \"\\n#################\"\n\tprint \"# Verified Fasta\"\n\tprint \"#################\\n\"\n\n\tlist_handle = [open(my_file, 'w') for my_file in listOfFile]\n\n\tinfo_extract = np.loadtxt(info_dat, dtype=\"string\", delimiter=\"\\t\")\n\n\tprogression=1\n\n\tseqiter = SeqIO.parse(data_fasta, \"fasta\")\n\n\tfor seq in seqiter :\n\t\tif seq.id in info_extract[:,0] :\n\n\t\t\tsys.stdout.write(\"{:.2f}% : {}/{} sequences wanted found\\r\".format(progression/float(info_extract.shape[0])*100, progression,info_extract.shape[0]))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tposition = info_extract[:,0].tolist().index(seq.id)\n\n\t\t\tif info_extract[position][1].split(\"_\")[0] in ['T2SS','T4P', 'Tad']:\n\n\t\t\t\tif info_extract[position][1] in PROTEIN_FUNCTION :\n\t\t\t\t\twriting_file = re.search('[a-zA-Z0-9/_]+'+PROTEIN_FUNCTION[info_extract[position][1]]+'\\.fasta', \"\\t\".join(listOfFile)).group(0)\n\n\t\t\t\t\tseq.name = info_extract[position][3]+\"_V_\"+\"_\".join(info_extract[position][1].split(\"_\")[1:])\n\t\t\t\t\tseq.id = seq.name\n\t\t\t\t\tseq.description = ''\n\n\t\t\t\t\tSeqIO.write(seq, list_handle[listOfFile.index(writing_file)], \"fasta\")\n\n\t\t\telse :\n\t\t\t\tnew_name = info_extract[position][2]+\"_\"+info_extract[position][1]\n\n\t\t\t\tif new_name in PROTEIN_FUNCTION :\n\t\t\t\t\twriting_file = re.search('[/a-zA-Z0-9_]*'+PROTEIN_FUNCTION[new_name]+'\\.fasta', \"\\t\".join(listOfFile)).group(0)\n\n\t\t\t\t\tseq.name = info_extract[position][3]+\"_V_\"+info_extract[position][1]\n\t\t\t\t\tseq.id = seq.name\n\t\t\t\t\tseq.description = ''\n\n\t\t\t\t\tSeqIO.write(seq, list_handle[listOfFile.index(writing_file)], \"fasta\")\n\n\tprint\n\tprint \"Done!\"\n\treturn", "def setUp(self):\r\n\r\n fd, self.sample_fasta_fp = mkstemp(prefix=\"sample_fasta_\",\r\n suffix=\".fna\")\r\n close(fd)\r\n seq_file = open(self.sample_fasta_fp, 'w')\r\n seq_file.write(sample_fasta_file)\r\n seq_file.close()\r\n\r\n fd, self.sample_fasta_invalid_fp = mkstemp(prefix=\"sample_fasta_\",\r\n suffix=\".fna\")\r\n close(fd)\r\n seq_file = open(self.sample_fasta_invalid_fp, 'w')\r\n seq_file.write(sample_fasta_file_invalid)\r\n seq_file.close()\r\n\r\n fd, self.sample_mapping_fp = mkstemp(prefix=\"sample_mapping_\",\r\n suffix=\".txt\")\r\n close(fd)\r\n map_file = open(self.sample_mapping_fp, \"w\")\r\n map_file.write(sample_mapping_file)\r\n map_file.close()\r\n\r\n fd, self.sample_tree_3tips_fp = mkstemp(\r\n prefix=\"sample_tree3tips_\",\r\n suffix=\".tre\")\r\n close(fd)\r\n tree_file = open(self.sample_tree_3tips_fp, \"w\")\r\n tree_file.write(sample_tree_file_3tips)\r\n tree_file.close()\r\n\r\n fd, self.sample_tree_5tips_fp = mkstemp(\r\n prefix=\"sample_tree3tips_\",\r\n suffix=\".tre\")\r\n close(fd)\r\n tree_file = open(self.sample_tree_5tips_fp, \"w\")\r\n tree_file.write(sample_tree_file_5tips)\r\n tree_file.close()\r\n\r\n fd, self.sample_mapping_file_errors_fp =\\\r\n mkstemp(prefix=\"error_mapping_\", suffix=\".txt\")\r\n close(fd)\r\n map_file = open(self.sample_mapping_file_errors_fp, \"w\")\r\n map_file.write(sample_mapping_file_errors)\r\n map_file.close()\r\n\r\n self._files_to_remove = [self.sample_fasta_fp,\r\n self.sample_fasta_invalid_fp, self.sample_mapping_fp,\r\n self.sample_tree_3tips_fp, self.sample_tree_5tips_fp,\r\n self.sample_mapping_file_errors_fp]\r\n\r\n self.output_dir =\\\r\n mkdtemp(prefix=\"validate_demultiplexed_fasta_\",\r\n suffix=\"/\")", "def make_fastq_multi(in_fasta, quals, out_fp,\r\n label_transform=split_lib_transform):\r\n mkdir(out_fp)\r\n seen_libs = defaultdict(list)\r\n for rec, label in iter_fastq(in_fasta, quals, label_transform):\r\n lib_id, seq_id = label.rsplit('_', 1)\r\n seen_libs[lib_id].append(rec)\r\n for lib, recs in seen_libs.items():\r\n if lib is None: # skip the seqs we couldn't assign to a library\r\n continue\r\n outfile = open(out_fp + '/' + lib + '.fastq', 'w')\r\n outfile.write('\\n'.join(recs))\r\n outfile.close()", "def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq", "def fasta_writer(file_obj, header, seq, wrap=60):\n file_obj.write(header + '\\n')\n for i in range(0, len(seq), wrap):\n file_obj.write(seq[i: i + wrap] + '\\n')", "def writeTmpFastq(self, fw_reads_path, rev_reads_path):\n try:\n fq1 = open(fw_reads_path, \"w+\")\n fq1.write(reads1_string)\n fq1.close()\n fq2 = open(rev_reads_path, \"w+\")\n fq2.write(reads2_string)\n fq2.close()\n except OSError:\n pass", "def writeTmpFastq(self, fw_reads_path, rev_reads_path):\n try:\n fq1 = open(fw_reads_path, \"w+\")\n fq1.write(reads1_string)\n fq1.close()\n fq2 = open(rev_reads_path, \"w+\")\n fq2.write(reads2_string)\n fq2.close()\n except OSError:\n pass", "def WriteNewFile(head_list, atom_list, tail_list):\n file = open(\"output.txt\", 'w')\n output_head = ''.join(map(str,head_list))\n output_atom = ''.join(map(str,atom_list))\n output_tail = ''.join(map(str,tail_list))\n output = ((output_head)+(output_atom)+(output_tail))\n file.write(output)", "def prepare_fasta_for_blastclust(in_fasta, out_fasta):\n with open(out_fasta, 'w') as out:\n i = 0\n for seq_record in SeqIO.parse(in_fasta, \"fasta\"):\n if len(seq_record.seq) > 5 and 'XXXXX' not in seq_record.seq and 'UUUUU' not in seq_record.seq:\n out.write(\n '>' + seq_record.id.split('|')[0] + '_' + str(i) + '\\n' + str(seq_record.seq) + '\\n')\n i += 1", "def split_fasta(infile, seqs_per_file, outfile_prefix, working_dir=''):\r\n if seqs_per_file <= 0:\r\n raise ValueError(\"seqs_per_file must be > 0!\")\r\n\r\n seq_counter = 0\r\n out_files = []\r\n if working_dir and not working_dir.endswith('/'):\r\n working_dir += '/'\r\n create_dir(working_dir)\r\n\r\n for seq_id, seq in parse_fasta(infile):\r\n if seq_counter == 0:\r\n current_out_fp = '%s%s.%d.fasta' \\\r\n % (working_dir, outfile_prefix, len(out_files))\r\n current_out_file = open(current_out_fp, 'w')\r\n out_files.append(current_out_fp)\r\n current_out_file.write('>%s\\n%s\\n' % (seq_id, seq))\r\n seq_counter += 1\r\n\r\n if seq_counter == seqs_per_file:\r\n current_out_file.close()\r\n seq_counter = 0\r\n\r\n if not current_out_file.closed:\r\n current_out_file.close()\r\n\r\n return out_files", "def write_fasta(self):\n patched_otus = get_patched_otus(\n self.db,\n self.settings,\n self.params[\"manifest\"]\n )\n\n sequence_otu_map = dict()\n\n sequences = get_sequences_from_patched_otus(\n patched_otus,\n self.params[\"data_type\"],\n sequence_otu_map\n )\n\n fasta_path = os.path.join(self.params[\"index_path\"], \"ref.fa\")\n\n write_sequences_to_file(fasta_path, sequences)\n\n index_id = self.params[\"index_id\"]\n\n self.db.indexes.update_one({\"_id\": index_id}, {\n \"$set\": {\n \"sequence_otu_map\": sequence_otu_map\n }\n })\n\n self.dispatch(\"indexes\", \"update\", [index_id])", "def test_assign_seqs_two_fastas_quals(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors,\r\n self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def write_degapped_fasta_to_file(seqs, tmp_dir=\"/tmp/\"):\r\n fd, tmp_filename = mkstemp(dir=tmp_dir, prefix=\"degapped_\",\r\n suffix=\".fasta\")\r\n close(fd)\r\n\r\n with open(tmp_filename, 'w') as fh:\r\n for seq in degap_fasta_aln(seqs):\r\n fh.write(seq.to_fasta())\r\n\r\n return tmp_filename", "def create_FASTA_file(proteins):\n frecords = []\n count = 0\n for prot in proteins:\n if proteins[prot][1] != \"\":\n prot_id = prot\n sequence = str(proteins[prot][1])\n frec = SeqRecord(Seq(sequence, IUPAC.protein), id=prot_id, description=\"\")\n frecords.append(frec)\n else:\n count += 1\n output_handle = open(\"BLAST/mouse.fasta\", \"w\")\n SeqIO.write(frecords, output_handle, \"fasta\")\n output_handle.close()\n print \"count = \", count", "def process_files_and_demultiplex_sequences(mapping_file,\r\n fasta_files,\r\n qual_files,\r\n output_dir=\"./\",\r\n keep_barcode=False,\r\n barcode_type='golay_12',\r\n max_bc_errors=0.5,\r\n start_index=1,\r\n write_unassigned_reads=False,\r\n disable_bc_correction=False,\r\n added_demultiplex_field=None,\r\n save_barcode_frequencies=False):\r\n\r\n file_data = {}\r\n\r\n fasta_files = [get_infile(fasta_f) for fasta_f in fasta_files]\r\n qual_files = [get_infile(qual_f) for qual_f in qual_files]\r\n\r\n file_data['fasta_files'] = fasta_files\r\n file_data['qual_files'] = qual_files\r\n file_data['mapping_file'] = open(mapping_file, \"U\")\r\n\r\n file_data['demultiplexed_seqs_f'] = open(join(output_dir,\r\n \"demultiplexed_seqs.fna.incomplete\"), \"w\")\r\n if qual_files:\r\n file_data['demultiplexed_qual_f'] = open(join(output_dir,\r\n \"demultiplexed_seqs.qual.incomplete\"), \"w\")\r\n if write_unassigned_reads:\r\n file_data['unassigned_seqs_f'] = open(join(output_dir,\r\n \"unassigned_seqs.fna.incomplete\"), \"w\")\r\n if qual_files:\r\n file_data['unassigned_qual_f'] =\\\r\n open(join(output_dir, \"unassigned_seqs.qual.incomplete\"), \"w\")\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n demultiplex_sequences(file_data, keep_barcode, barcode_type,\r\n max_bc_errors, start_index, write_unassigned_reads,\r\n disable_bc_correction, added_demultiplex_field)\r\n\r\n final_log_data = process_log_data(log_data, seq_counts, mapping_file,\r\n fasta_files, qual_files, corrected_bc_count, keep_barcode, barcode_type,\r\n max_bc_errors, start_index, write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n log_file = open(join(output_dir, \"demultiplex_fasta.log\"), \"w\")\r\n log_file.write(\"\\n\".join(final_log_data))\r\n\r\n if save_barcode_frequencies:\r\n bcs_sorted_list = process_bc_freqs(bc_freqs)\r\n bc_freqs_f = open(join(output_dir, \"barcode_freqs.txt\"), \"w\")\r\n bc_freqs_f.write(\"Barcode frequencies\\n\")\r\n bc_freqs_f.write(\"\\n\".join(bcs_sorted_list))\r\n\r\n # Rename .incomplete files to .fna/.qual files\r\n\r\n rename(file_data['demultiplexed_seqs_f'].name, join(output_dir,\r\n \"demultiplexed_seqs.fna\"))\r\n if qual_files:\r\n rename(file_data['demultiplexed_qual_f'].name, join(output_dir,\r\n \"demultiplexed_seqs.qual\"))\r\n if write_unassigned_reads:\r\n rename(file_data['unassigned_seqs_f'].name, join(output_dir,\r\n \"unassigned_seqs.fna\"))\r\n if qual_files:\r\n rename(file_data['unassigned_qual_f'].name, join(output_dir,\r\n \"unassigned_seqs.qual\"))", "def make_fastq_single(in_fasta, quals, out_fp,\r\n label_transform=split_lib_transform):\r\n outfile = open(out_fp, 'w')\r\n for rec, seq_id in iter_fastq(in_fasta, quals, label_transform):\r\n outfile.write(rec + '\\n')\r\n outfile.close()", "def process_reads_joined(args):\n\n watson_joined_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_joined_r1', dir=args.tmpdir,\n delete=False)\n watson_joined_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_joined_r2', dir=args.tmpdir,\n delete=False)\n crick_joined_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='crick_joined_r1', dir=args.tmpdir,\n delete=False)\n crick_joined_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='crick_joined_r2', dir=args.tmpdir,\n delete=False)\n args.watson_joined_r1 = watson_joined_r1.name\n args.watson_joined_r2 = watson_joined_r2.name\n args.crick_joined_r1 = crick_joined_r1.name\n args.crick_joined_r2 = crick_joined_r2.name\n\n print('Started processing joined reads')\n if args.reads_R1.endswith('.gz'):\n r1_handle = gzip.open(args.reads_R1, 'rt')\n r2_handle = gzip.open(args.reads_R2, 'rt')\n else:\n r1_handle = open(args.reads_R1, 'rt')\n r2_handle = open(args.reads_R2, 'rt')\n #make 4 file handles for forward and reverse watson and crick\n watson_r1_handle = open(args.watson_joined_r1, 'w')\n watson_r2_handle = open(args.watson_joined_r2, 'w')\n crick_r1_handle = open(args.crick_joined_r1, 'w')\n crick_r2_handle = open(args.crick_joined_r2, 'w')\n j = 0\n while True:\n read_r1 = []\n read_r2 = []\n for i in range(4):\n try:\n read_r1.append(next(r1_handle))\n read_r2.append(next(r2_handle))\n except StopIteration:\n break\n j += 1\n try:\n if int(args.sequences) == j:\n break\n except TypeError:\n pass\n if not j % 1000000:\n print('Processed %s reads' % (j))\n if not read_r1:\n break\n if 'watson' in read_r1[0].lower():\n convert_r1 = read_r1[1].upper().replace('C', 'T')\n convert_r2 = read_r2[1].upper().replace('G', 'A')\n c_pos = [str(n) for n, i in enumerate(read_r1[1]) if i.upper() == 'C']\n g_pos = [str(n) for n, i in enumerate(read_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'G']\n header = '@%s' % (read_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header += '|%s\\n' % (','.join(c_pos) + '|' + ','.join(g_pos))\n watson_r1_handle.write(header + convert_r1 + '+\\n' + read_r1[3])\n watson_r2_handle.write(header + convert_r2 + '+\\n' + read_r2[3])\n else:\n convert_r1 = read_r1[1].upper().replace('G', 'A')\n convert_r2 = read_r2[1].upper().replace('C', 'T')\n g_pos = [str(n) for n, i in enumerate(read_r1[1]) if i.upper() == 'G']\n c_pos = [str(n) for n, i in enumerate(read_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'C']\n header = '@%s' % (read_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header += '|%s\\n' % (','.join(g_pos) + '|' + ','.join(c_pos))\n crick_r1_handle.write(header + convert_r1 + '+\\n' + read_r1[3])\n crick_r2_handle.write(header + convert_r2 + '+\\n' + read_r2[3])\n crick_r1_handle.close()\n crick_r2_handle.close()\n watson_r1_handle.close()\n watson_r2_handle.close()\n return args", "def find_in_fasta(fileFasta, fileReport, listOfFile, INFO, PROTEIN_FUNCTION):\n\n\tlist_handle=[open(my_file,\"w\") for my_file in listOfFile]\n\n\twanted, name_genes, keys_genes = extract_protein(fileReport, INFO, PROTEIN_FUNCTION)\n\tseqiter = SeqIO.parse(open(fileFasta), 'fasta')\n\n\tprint \"\\n#################\"\n\tprint \"# Writing ...\"\n\tprint \"#################\\n\"\n\n\tprogression=1\n\tseq_wanted = len(wanted)\n\n\tfor seq in seqiter :\n\t if seq.id in wanted:\n\t\t\tsys.stdout.write(\"{:.2f}% : {}/{} sequences wanted found\\r\".format(progression/float(seq_wanted)*100, progression,seq_wanted))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tindex = wanted.index(seq.id)\n\t\t\tseq.description = ''\n\t\t\tseq.name = name_genes[index]\n\t\t\tseq.id = seq.name\n\n\t\t\tif keys_genes[index] in PROTEIN_FUNCTION :\n\t\t\t\twriting_file = re.search('[a-zA-Z0-9/_]+'+PROTEIN_FUNCTION[keys_genes[index]]+'\\.fasta', \"\\t\".join(listOfFile)).group(0)\n\n\t\t\t\tSeqIO.write(seq, list_handle[listOfFile.index(writing_file)], \"fasta\")\n\t\t\telse :\n\t\t\t\tsys.exit(\"ERROR:: Function not known : \"+keys_genes[index])\n\n\tprint\n\tprint \"Done!\"\n\n\t#Close all file\n\tfor open_file in list_handle:\n\t\topen_file.close()\n\n\tprint \"\\n#################\"\n\tprint \"# File wrote\"\n\tprint \"#################\\n\"", "def test_multiple_output_files(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_ids = [('PC.634', expected_fastq_634_default),\r\n ('PC.354', expected_fastq_354_default),\r\n ('PC.481', expected_fastq_481_default)]\r\n for sample_id, expected_output in sample_ids:\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_output)", "def test_multiple_output_files(self):\r\n convert_fastaqual(self.fasta_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_id_s = [('PC.634', expected_fasta_634_default,\r\n expected_qual_634_default),\r\n ('PC.354', expected_fasta_354_default,\r\n expected_qual_354_default),\r\n ('PC.481', expected_fasta_481_default,\r\n expected_qual_481_default)]\r\n for sample_id, expected_fasta, expected_qual in sample_id_s:\r\n actual_output_fasta_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fna',\r\n self.output_dir)\r\n\r\n actual_output_qual_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.qual',\r\n self.output_dir)\r\n\r\n actual_output_fasta = open(actual_output_fasta_path)\r\n actual_output_qual = open(actual_output_qual_path)\r\n actual_fasta = actual_output_fasta.read()\r\n actual_output_fasta.close()\r\n actual_qual = actual_output_qual.read()\r\n actual_output_qual.close()\r\n self._files_to_remove.append(actual_output_fasta_path)\r\n self._files_to_remove.append(actual_output_qual_path)\r\n\r\n self.assertEquals(actual_fasta, expected_fasta)\r\n self.assertEquals(actual_qual, expected_qual)", "def tmp_all_reads_fasta(self):\n return op.join(self.out_dir, \"all_reads.fasta.tmp\")", "def write_fasta(alignment, dest):\n file_obj = None\n if isinstance(dest, str):\n file_obj = open(dest, \"w\")\n else:\n file_obj = dest\n for name, seq in list(alignment.items()):\n file_obj.write('>%s\\n%s\\n' % (name, seq) )\n if isinstance(dest, str):\n file_obj.close()", "def __write_dupe_file(self, filename):\n sortedList = sorted(self.dupeList, key=lambda file: file[0])\n with open(filename, mode='w') as outfile:\n for size, md5, filename, ino in sortedList:\n outfile.write(\"%s %s %s %s\\n\" % (size, md5, ino, filename))", "def test_call_write_to_file(self):\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath,\r\n result_path=self.result_filepath)\r\n with open(self.result_filepath) as f:\r\n actual = SequenceCollection.from_fasta_records(parse_fasta(f), DNA)\r\n expected = SequenceCollection.from_fasta_records(\r\n parse_fasta(rep_seqs_reference_result_file_exp.split('\\n')), DNA)\r\n # we don't care about order in the results\r\n self.assertEqual(set(actual), set(expected))", "def fix_fasta(database_names):\n for file in database_names:\n file_mod = file.replace(\".fasta\", \"_mod.fasta\")\n with open(file, 'r') as f:\n lines = f.readlines()\n new_lines = []\n for line in lines:\n if '|' in line and \">\" not in line:\n # we replace spaces in header line with \"__\"\n # so I can manipulate that later as biopython doesn't\n # like \"__\"\n new_line = \">\"+line.replace(\" \", \"__\")\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n with open(file_mod, 'w') as f:\n for line in new_lines:\n f.write(line)", "def _separate_amplicons( file_list, reference_fasta, locus):\n subread_file, other_files = _separate_file_list( file_list, locus )\n alignment = _align_subreads( subread_file, reference_fasta, locus )\n locations = _parse_alignment( alignment )\n os.remove( alignment )\n medians = _calculate_medians( locations )\n centroids = _identify_centroids( locations, medians )\n assignments = _assign_reads( medians, centroids )\n new_subread_files = _write_assigned_reads( subread_file, assignments )\n return new_subread_files + other_files", "def write_one(paths_out, solutii, current_fis, note=\"\"):\n f = open(f\"{paths_out[current_fis]}\", \"a\")\n f.write(note)\n for s in solutii:\n f.write(s)", "def write_sequences_to_file(path: str, sequences: typing.Iterable):\n with open(path, \"w\") as handle:\n for sequence in sequences:\n sequence_id = sequence[\"_id\"]\n sequence_data = sequence[\"sequence\"]\n\n line = f\">{sequence_id}\\n{sequence_data}\\n\"\n handle.write(line)", "def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)", "def create_RCSB_fastas(file):\n folder_path = create_folder(file)\n filepath = RAWDATA_PATH + file\n with open(filepath, 'r') as raw_file:\n raw_data = raw_file.read()\n chains = raw_data.split('>')\n cache = []\n for chain in chains[1:]:\n head = chain[:4] # for SCOP\n chain_number = chain[5]\n prefix_pos = 27 # for SCOP\n prefix = chain[:prefix_pos]\n sequence = chain[prefix_pos:]\n sequence = sequence.replace('\\n', '')\n assert prefix[6:] == '|PDBID|CHAIN|SEQUENCE', 'Unknown prefix'\n if chain_number < 'A' or chain_number > 'Z': # invalid chain\n continue\n elif sequence in cache: # same chain\n continue\n if not cache: # new protein\n cache = [head, sequence]\n elif head != cache[0]: # new protein\n protein_sequence = ''\n for cached_sequence in cache[1:]:\n protein_sequence += cached_sequence\n if len(protein_sequence) > 300:\n new_fasta = open('{0}\\{1}.txt'.format(folder_path, head), 'w')\n new_fasta.write('>' + chain[:prefix_pos] + '\\n')\n new_fasta.write(protein_sequence)\n new_fasta.close\n cache = [head, sequence]\n cache.append(sequence)\n new_fasta = open('{0}\\{1}.txt'.format(folder_path, head), 'w')\n new_fasta.write('>' + chain[:prefix_pos] + '\\n')\n for cached_sequence in cache[1:]:\n new_fasta.write(cached_sequence)\n new_fasta.close", "def inBoth(from_files):\n t_nof1 = []\n f_nof1 = []\n array_of_times = []\n for file in from_files:\n item = file.replace('_COMPLETE', '')\n if item in to_files:\n to = os.path.join('/ToNof1/archive', item)\n from_nof1 = os.path.join('/FromNof1', file)\n t_nof1.append(to)\n f_nof1.append(from_nof1)\n\n\n\n with open(\"TAT_From_Nof1.tsv\", 'w') as f:\n i = 0\n myHeader = \"Completed File\\tCompleted Time\\tSent File\\tSent Time\\tDelta\\n\"\n f.write(myHeader)\n while i < len(to_files):\n today = datetime.today()\n\n fName = os.path.basename(f_nof1[i])\n tName = os.path.basename(t_nof1[i])\n\n fTime = getDate(f_nof1[i])\n tTime = getDate(t_nof1[i])\n\n duration = (today - fTime)\n if duration.days < 90:\n delta = fTime - tTime\n seconds = (delta.total_seconds())\n minutes = seconds / 60.0\n hours = minutes / 60.0\n array_of_times.append(hours)\n delta = str(delta)\n fTime = str(fTime)\n tTime = str(tTime)\n myString = (fName + \"\\t\" + fTime + \"\\t\" + tName + \"\\t\" + tTime + \"\\t\" + delta + \"\\n\")\n f.write(myString)", "def read_several_fasta(input_files):\n pb_seq = []\n pb_name = []\n for name in input_files:\n header, seq = read_fasta(name)\n pb_name += header\n pb_seq += seq\n return pb_name, pb_seq", "def test_fasta_ids(self):\r\n first = StringIO('>x\\nACT\\n>y\\nAAA')\r\n first_copy = StringIO('>x\\nACT\\n>y\\nAAA')\r\n second = StringIO('>a\\nGGG\\n>b\\nCCC')\r\n self.assertEqual(fasta_ids([first, second]), set(['x', 'y', 'a', 'b']))\r\n first.seek(0) # need to reset so we can read it again\r\n self.assertRaises(ValueError, fasta_ids, [first, first_copy])", "def CatFasta2(inFile,beginSeqIndex,endSeqIndex,fpout):#{{{\n cntSeq=0\n fpin = open(inFile, \"r\")\n buff = fpin.read(BLOCK_SIZE)\n brokenseq=\"\"; ##for the seq broken by BLOCK\n while buff:\n if cntSeq > endSeqIndex:\n break\n beg=0\n end=0\n while 1:\n if brokenseq:\n end=buff.find(\"\\n>\")\n if end >= 0:\n seq=brokenseq+buff[0:end]\n brokenseq=\"\"\n beg=end\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%seq)\n else:\n brokenseq += buff\n break\n\n beg=buff.find(\">\",beg)\n end=buff.find(\"\\n>\",beg+1)\n if beg >= 0:\n cntSeq+=1\n if end >=0:\n seq=buff[beg:end]\n beg=end\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%seq)\n else:\n brokenseq=buff[beg:]\n break\n else:\n brokenseq+=buff\n break\n buff = fpin.read(BLOCK_SIZE)\n if brokenseq:\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%brokenseq)\n\n fpin.close()\n return 0", "def stampa_single(single_read, bam_file):\n single_file = pysam.AlignmentFile(\"single_reads.sam\", \"w\",\n referencenames=bam_file.references,\n referencelengths=bam_file.lengths)\n for read in single_read:\n single_file.write(read)\n single_file.close()", "def test_compute_seqs_per_file(self):\r\n fd, temp_fasta_fp = mkstemp(prefix='QiimeScriptUtilTests',\r\n suffix='.fasta')\r\n close(fd)\r\n temp_fasta = ['>seq', 'AAACCCCAAATTGG'] * 25\r\n open(temp_fasta_fp, 'w').write('\\n'.join(temp_fasta))\r\n\r\n actual_25 = self.pw._compute_seqs_per_file(temp_fasta_fp, 25)\r\n actual_2 = self.pw._compute_seqs_per_file(temp_fasta_fp, 2)\r\n actual_10 = self.pw._compute_seqs_per_file(temp_fasta_fp, 10)\r\n actual_5 = self.pw._compute_seqs_per_file(temp_fasta_fp, 5)\r\n actual_40 = self.pw._compute_seqs_per_file(temp_fasta_fp, 40)\r\n\r\n remove_files([temp_fasta_fp])\r\n\r\n self.assertEqual(actual_25, 1)\r\n self.assertEqual(actual_2, 13)\r\n self.assertEqual(actual_10, 3)\r\n self.assertEqual(actual_5, 5)\r\n self.assertEqual(actual_40, 1)", "def extract_sequences(self, new_fasta, ids):\n assert isinstance(new_fasta, FASTA)\n new_fasta.create()\n for seq in self:\n if seq.id in ids: new_fasta.add_seq(seq)\n new_fasta.close()", "def read_write_protein_files(dir_path, heme_files):\n for i in number_of_files:\n# seqs = {}\n input_files = (dir_path + heme_files[i])\n f = open(input_files)\n count = 0\n# output_file = (dir_path + heme_files[i] + \".txt\")\n# g = open(output_file, \"x\")\n with open(input_files) as f:\n for line in f:\n if line.startswith('>'):\n name = line[1:].rstrip('\\n')\n count = count + 1\n seqs =[]\n else: # sequence, not header\n seqs[name] = seqs[name] + line\n# sequences += line[:-1]\n# output_file = open(\"out_\" + str(count) + \"_.txt\", \"a\")\n# output_file.write(str(len(sequences)))\n print(\"Number of proteins read:\" + count)\n f.close", "def test_force_fasta_output(tmp_path, cores):\n\n out_path = os.fspath(tmp_path / \"out.fasta\")\n with open(out_path, \"w\") as out_file:\n py = subprocess.Popen(\n [\n sys.executable,\n \"-m\",\n \"cutadapt\",\n \"--fasta\",\n \"-o\",\n \"-\",\n \"--cores\",\n str(cores),\n \"-a\",\n \"TTAGACATATCTCCGTCG\",\n datapath(\"small.fastq\"),\n ],\n stdout=out_file,\n )\n _ = py.communicate()\n assert_files_equal(cutpath(\"small.fasta\"), out_path)", "def write_seqs_fasta(out_fp_seqs_fasta: str, out_fp_seqs_qza: str,\n tsv_pd: pd.DataFrame, tsv_fp: str = '') -> str:\n with open(out_fp_seqs_fasta, 'w') as fas_o:\n for seq in tsv_pd.index:\n fas_o.write('>%s\\n%s\\n' % (seq.strip(), seq.strip()))\n cmd = '# Write features as fasta file:\\n'\n cmd += '# - Features from: %s\\n' % tsv_fp\n cmd += '# Snippet:\\n'\n cmd += '# ```:\\n'\n cmd += \"# with open(fasta_out, 'w') as o:\\n\"\n cmd += \"# for seq in tsv_pd.index:\\n\"\n cmd += \"# o.write('>%s\\\\n%s\\\\n' % (seq.strip(), seq.strip()))\\n\"\n cmd += '# ```:\\n'\n cmd += run_import(\n out_fp_seqs_fasta, out_fp_seqs_qza, 'FeatureData[Sequence]')\n return cmd", "def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)", "def concatenate_detected_verified(fasta_name, PATH_FASTA_DETECTED, PATH_FASTA_VERIFIED, INFO_folder, PATH_FASTA_CONCATENATED):\n\n\tprint \"\\n#################\"\n\tprint \"# Concatetaned file\"\n\tprint \"#################\\n\"\n\n\t# NOTE Dictionaire avec en clef l'id espèce/système et en value une liste\n\t# NOTE [\"l'id espèce/système du verifié qui correspond\", [liste des sequences ATPase, IM ...]]\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tlist_seq_verified = list(SeqIO.parse(verified_fasta, \"fasta\"))\n\t\tlist_id_verified = [seq.id for seq in list_seq_verified]\n\t\tlist_seq_verified = [seq.seq for seq in list_seq_verified]\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\t# IDEA Il faut tester au moins une fois pour voir si lors de la concatenation, je ne me retrouve pas avec des systems ou je n'ai pas tous enlevé. Exemple l'ATPase de X n'est pas la même que celle de Y mais l'IMplatform l'ai si c'est le cas X est a enlevé aussi pour son ATPase\n\t\t# IDEA Si idea précédente vrai alors il faut faire des fichiers temporaires des sequences que l'on garde et concatener par \"cat\" à la fin le fichier temporaire et son homonyme en verifié.\n\n\t\t# NOTE Il y avait un problème : le nom/id de l'epèce + système ne doit pas contenir le _NumX_ car ce Num fait référence au nombre de duplicat de la protéine (exemple deux ATPase gspE)\n\t\t# NOTE Quelques systèmes on des sequences qui sont similaire pour toutes les protéines sauf une exemple ESCO3 et NC_011993 qui sont identique pour tous sauf ATPase (98% seulement)\n\n\t\tfor seq in seq_parser :\n\n\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\tif id_seq in dict_remove :\n\t\t\t\tcontinue\n\n\t\t\telif seq.seq in list_seq_verified :\n\t\t\t\tindex=list_seq_verified.index(seq.seq)\n\n\t\t\t\tid_seq_verif = list_id_verified[index].split(\"_\")\n\t\t\t\tid_seq_verif = re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq_verif[:id_seq_verif.index(\"V\")]))\n\n\t\t\t\t# NOTE dans le dictionnaire je met le système vérifié en premier, toutes les séquences du système identitique en deuxième et la séquence qui en est la cause en troisème\n\t\t\t\tdict_remove[id_seq]=[id_seq_verif,[], seq.id]\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tos.system('cat \"{}\" > \"{}\"'.format(verified_fasta, concatenated_fasta))\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\twith open(concatenated_fasta, \"a\") as w_file :\n\t\t\tfor seq in seq_parser :\n\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, w_file, \"fasta\")\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\t# NOTE Dict remove complete and all concatenate write\n\twrite_remove_concatenate(dict_remove, INFO_folder)\n\n\treturn", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def append_files(in_file1, character, in_file2, out_file):\n return_data = 0\n\n write_data = ''\n\n i = 0\n try:\n with open(in_file1, 'rt') as fi1:\n lines1 = fi1.readlines() # Read all the lines in fi1 as a tuple\n \n with open(in_file2, 'rt') as fi2:\n lines2 = fi2.readlines() # Read all the lines in fi2 as a tuple\n \n with open(out_file, 'at') as fo:\n fo.seek(0,2)\n while i < len(lines1):\n lines1[i] = lines1[i].rstrip('\\n')\n #lines1[i] = lines1[i].rstrip('\\r')\n fo.write(lines1[i] + character + lines2[i])\n i = i + 1\n print(write_data)\n except IOError:\n print(\"Error in reading/writing file.\")\n return_data = 2\n else:\n print('Operation completed successfully.')\n return_data = 1\n finally:\n fi2.close()\n fi1.close()\n fo.close()\n print(\"done\")\n return return_data", "def generateAlignment(seqs):\n \"\"\"Create temporary file for MUSCLE\"\"\"\n inFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n \n \n \"\"\"Creates an align object or pd.Series() with indexing to preserve order but does not appyl padding\"\"\"\n align = padAlignment(seqs, applyPadding=False)\n \"\"\"Put alignments in the tempfiles\"\"\"\n align2fasta(seqs, inFn, applyPadding=False)\n\n muscleCommand = ['muscle', '-in', inFn, '-out', outFn]\n result = subprocess.call(muscleCommand)\n\n \"\"\"If MUSCLE was successful\"\"\"\n if not result:\n outAlign = fasta2align(outFn)\n else:\n print(\"Error in MUSCLE!\")\n raise Exception(\"MUSCLEError\")\n \"\"\"Remove the temporary files\"\"\"\n os.remove(inFn)\n os.remove(outFn)\n \n \"\"\"MUSCLE seqs need to be reorderd using the original index\"\"\"\n outAlign = outAlign.loc[[str(i) for i in align.index]]\n \"\"\"Index was str() through FASTA files so reset index with original index\"\"\"\n outAlign.index = align.index\n \n \"\"\"Check that all seqs are being returned in the correct order\"\"\"\n badSeqs = 0\n if not len(seqs) == len(outAlign):\n print('Different number of output seqs!')\n badSeqs+=1\n\n for i, s1, s2 in zip(np.arange(len(seqs)), seqs, outAlign):\n if not s1.replace('-', '') == s2.replace('-', ''):\n print('%d: %s != %s' % (i, s1, s2))\n badSeqs+=1\n if badSeqs>0:\n raise Exception('Output seqs are different than input seqs! (%d)' % badSeqs)\n\n return outAlign", "def fastq_to_fasta(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_filtered.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fastq\"):\n ID = str(record.id)\n SEQ = str(record.seq)\n if ID in wanted_set:\n out.write(\">\" + ID + \"\\n\" + SEQ + \"\\n\")", "def catAlignments(alignA, alignB):\n\n \"\"\"Create temporary files for MUSCLE to work on the two alignments\"\"\"\n aFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n bFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n\n \n \"\"\"Make sure alignments have the same length and are Series objects\"\"\"\n alignA = padAlignment(alignA)\n alignB = padAlignment(alignB)\n\n \"\"\"Put alignments in the tempfiles\"\"\"\n align2fasta(alignA, aFn)\n align2fasta(alignB, bFn)\n\n muscleCommand = ['muscle', '-profile', '-in1', aFn, '-in2', bFn, '-out', outFn]\n result = subprocess.call(muscleCommand)\n\n \"\"\"If MUSCLE was successful\"\"\"\n if not result:\n outAlign = fasta2align(outFn)\n else:\n print(\"Error in MUSCLE!\")\n raise Exception(\"MUSCLEError\")\n \n \"\"\"\n except:\n pass\n os.remove(aFn)\n os.remove(bFn)\n os.remove(outFn)\n raise\n \"\"\"\n \"\"\"Remove the temporary files\"\"\"\n os.remove(aFn)\n os.remove(bFn)\n os.remove(outFn)\n\n return outAlign", "def test_split_fasta_diff_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 2, filename_prefix)\r\n\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(2)]\r\n # list of file paths is as expected\r\n self.assertEqual(actual, expected)\r\n # building seq collections from infile and the split files result in\r\n # equivalent seq collections\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def convert_single_to_multi(file_directory):\n all_files = os.listdir(file_directory)\n merged_file = ''\n for file_name in all_files:\n f_single = open(file_directory + '/' + file_name, \"r\")\n data = f_single.read() + '\\n'\n merged_file += data\n date = datetime.datetime.now()\n save_seq_file(('multi_fasta_' + str(date).split(' ')[0]), merged_file, file_directory)", "def export_fasta(self, metadata, analysistype, reportpath, cutoff, program):\n logging.info('Creating FASTA-formatted files of outputs')\n for sample in metadata:\n # Set the name of the FASTA output file\n sample[analysistype].fasta_output = os.path.join(reportpath, '{sn}_{prog}.fasta'.format(sn=sample.name,\n prog=analysistype))\n # Remove the file if it exists. Otherwise, if the samples are processed by the pipeline more than\n # once, the same results will be appended to the file\n try:\n os.remove(sample[analysistype].fasta_output)\n except FileNotFoundError:\n pass\n # Process the sample only if the script could find targets\n if sample[analysistype].blastresults != 'NA' and sample[analysistype].blastresults:\n # Open the FASTA output file in append mode\n with open(sample[analysistype].fasta_output, 'a+') as fasta_output:\n for target in sorted(sample[analysistype].targetnames):\n index = 0\n for hit in sample[analysistype].blastlist:\n if hit['subject_id'] == target:\n # Set the name and percent id to avoid writing out the dictionary[key] multiple times\n if float(hit['percent_match']) >= cutoff:\n # If the 'align' option was not specified, the .dnaseq attribute will be an empty\n # dictionary. Populate this attribute as required\n try:\n # The .dnaseq attribute will not exist for amino-acid based searches\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n # The .targetsequence attribute will be sufficient\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except (KeyError, IndexError):\n # Align the protein (and nucleotide) sequences to the reference\n sample = self.alignprotein(sample=sample,\n analysistype=analysistype,\n target=target,\n program=program,\n index=index,\n hit=hit)\n try:\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except IndexError:\n fasta = str()\n # Create the SeqRecord of the FASTA sequence\n if fasta:\n try:\n record = SeqRecord(fasta,\n id='{name}_{target}'\n .format(name=sample.name,\n target=target),\n description='')\n # Write the FASTA-formatted record to file\n fasta_output.write(record.format('fasta'))\n except (AttributeError, TypeError):\n pass\n index += 1\n # Return the updated metadata object\n return metadata", "def removeSeqsInFile(filename,listname):\n from Bio import SeqIO\n mylist = open(listname).read().split()\n myfas = list(SeqIO.parse(filename,\"fasta\"))\n fo = open(filename+\"_out\",\"w\")\n for ele in myfas:\n if ele.id not in mylist:\n fo.write(\">\"+ele.description+\"\\n\"+str(ele.seq)+\"\\n\")\n fo.close()\n return None", "def writeMultipleFiles(self, filePaths, ss): \n \n for i,filePath in enumerate(filePaths): \n self.writeSingleFileLines(filePath, [ss[i]])", "def unprepare_fasta_after_blastclust(in_fasta):\n tmp_file_name = in_fasta + TMP\n\n with open(tmp_file_name, 'w') as tmp_file:\n for seq_record in SeqIO.parse(in_fasta, \"fasta\"):\n tmp_file.write('>' + seq_record.id.split('_')[0] + '\\n' + str(seq_record.seq) + '\\n')\n\n os.remove(in_fasta)\n os.rename(tmp_file_name, in_fasta)", "def sequential_dump(list_of_whats, where, prefix, **args):\n # @todo - pad the number in filename\n for i, what in enumerate(list_of_whats,1):\n here = os.path.join(where,\"{}_{}.p\".format(prefix,i))\n pickle.dump(what,open(here,\"wb\"),**args)", "def Save_Fastas2(UniprotIDs):\r\n file=open(\"../Data/Negative_cases/negative_cases.fasta\",\"w\")\r\n for ID in UniprotIDs:\r\n data=urllib.request.urlopen(\"http://www.uniprot.org/uniprot/%s.fasta\" %ID)\r\n f=data.readlines()\r\n for lines in f:\r\n file.write(str(lines))\r\n #help(data)\r\n file.close()", "def save_to_file(filename: str, sequence: List[Sample]):\n\n with open(get_path() + \"/sequence/\" + filename, \"ab+\") as file:\n for sample in sequence:\n pickle.dump(sample, file, pickle.HIGHEST_PROTOCOL)", "def write_manifests( file_lists, target_dir, output_dir ):\n for i, lst in enumerate( file_lists ):\n with open( os.path.join( output_dir, \"manifest-{}.txt\".format( i ) ), \"w\" ) as fout:\n for r in lst:\n fout.write( insert_rsync_marker( r, target_dir ) + \"\\n\" )", "def convertFastqToFasta(inputFastq, outputFasta):\n out = open(outputFasta, \"w\")\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n out.write(\">%s\\n%s\\n\" % (titleStr, seqStr))", "def concatenate_fastq(path, isfastq, sample_name):\n \n r1 = []\n r2 = []\n filenames = get_filesnames_in_dir(path)\n \n for i in filenames:\n if \"fake_genome\" in i:\n continue\n elif \"R1\" in i:\n r1.append(i)\n elif \"R2\" in i:\n r2.append(i)\n if isfastq:\n nameR1 = sample_name + \"-R1.fastq\"\n nameR2 = sample_name + \"-R2.fastq\"\n else:\n nameR1 = sample_name + \"-R1.fasta\"\n nameR2 = sample_name + \"-R2.fasta\"\n\n #concatinate R1\n with open(path + nameR1, 'w') as outfile:\n for fname in sorted(r1):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n #concatinate R2\n with open(path + nameR2, 'w') as outfile:\n for fname in sorted(r2):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n \n for i in r1 + r2:\n os.remove(path + i)", "def convert_multi_to_single(file_directory):\n all_files = os.listdir(file_directory)\n for file_name in all_files:\n f_multi = open(file_directory + '/' + file_name, \"r\")\n for seq_record in SeqIO.parse(f_multi, \"fasta\"):\n seq_id = seq_record.id\n sequence = str(seq_record.seq)\n save_seq_file(seq_id, sequence, file_directory)", "def join_map_and_trim(mapped, trimmed):\n out_file = \"%sfinal.fastq\" % os.path.commonprefix([mapped, trimmed])\n if not os.path.exists(out_file):\n with open(out_file, \"w\") as out_handle:\n for fname in [mapped, trimmed]:\n with open(fname) as in_handle:\n for line in in_handle:\n out_handle.write(line)\n return out_file", "def isolate_consensus(groups, fasta):\n with open(groups,\"r\") as f:\n file = f.readlines()\n\n sequences_in_out = open(\"sequences_in_out.table\",\"w\")\n consensus_sequences = open(\"consensus_sequences.fasta\",\"w\")\n\n for consensus_num, line in enumerate(file):\n group_sequences = line.rsplit()\n open(\"temp_sequences\",\"w\").close()\n # Write group of sequences to temporary file\n for group in group_sequences:\n sequences_in_out.write(\"{0}\\t\".format(group))\n with open(\"temp_sequences\",'a') as output:\n output.write(\">{0}\\n{1}\\n\".format(group, fasta[group]))\n # Run MAFFT alignment\n sp.call(['/apps/mafft/7.127/bin/mafft', '--adjustdirection', \n '--clustalout', '--preservecase', 'temp_sequences'],\n stdout=open('temp_alignment.txt','w'), \n stderr=open('temp_mafft_log','w') )\n # cat MAFFT log files\n sp.call(['cat','temp_mafft_log'], \n stdout = open(\"all_alignments.log\",'a+'))\n # cat MAFFT alignment files\n sp.call(['cat','temp_alignment.txt'], \n stdout = open(\"all_alignments.faa\",'a+'))\n\n #output group_sequences and the consensus generated in a file\n sequences_in_out.write(\"||\\t>Consensus_{0}\\n\".format(consensus_num + 1))\n align=Bio.AlignIO.read(\"temp_alignment.txt\",\"clustal\")\n summary_align = AlignInfo.SummaryInfo(align)\n consensus = summary_align.dumb_consensus(threshold=0.51, ambiguous='N')\n consensus_sequences.write(\">Consensus_{0}\\n{1}\\n\".format(\n consensus_num + 1,consensus))\n sequences_in_out.close()\n temp_files = glob(\"*temp*\")\n sp.call(['rm'] + temp_files)", "def print_rep_seqs(mapping, seqs, out_fp):\r\n out_fh = open(out_fp + \"/prefix_dereplicated.fasta\", \"w\")\r\n for s in (get_representatives(mapping, seqs.iteritems())):\r\n out_fh.write(s.to_fasta())\r\n out_fh.close()" ]
[ "0.7115534", "0.6977876", "0.6929793", "0.66960967", "0.66449195", "0.6635358", "0.6599218", "0.6559618", "0.6408864", "0.6341006", "0.6326734", "0.6306929", "0.629032", "0.62058866", "0.6190285", "0.612359", "0.611101", "0.6104494", "0.60735565", "0.6060679", "0.60134447", "0.6001778", "0.59668463", "0.59595895", "0.5946026", "0.59359753", "0.5927326", "0.5924785", "0.5923731", "0.5922217", "0.5900689", "0.5893501", "0.5875647", "0.58740497", "0.58451104", "0.58375794", "0.58054227", "0.5761539", "0.57549435", "0.5754588", "0.57536", "0.5749472", "0.5748666", "0.5743233", "0.5743233", "0.5741694", "0.5728542", "0.5719292", "0.5716567", "0.57081115", "0.57038873", "0.5676375", "0.566787", "0.5659049", "0.5656849", "0.56507", "0.56359375", "0.5635494", "0.56303483", "0.55978376", "0.55805296", "0.55750895", "0.5571494", "0.55667824", "0.5561713", "0.55371994", "0.55290395", "0.5522863", "0.5520308", "0.5513721", "0.5504238", "0.55010873", "0.54983133", "0.5493188", "0.5490445", "0.5490372", "0.5476364", "0.54738384", "0.546407", "0.54636437", "0.54614586", "0.54538596", "0.54450333", "0.5442205", "0.54407567", "0.54378015", "0.543755", "0.5430674", "0.54221064", "0.541213", "0.54082155", "0.54043174", "0.53998864", "0.53989285", "0.5397062", "0.5393472", "0.53918964", "0.53913987", "0.5390872", "0.5390491", "0.5387579" ]
0.0
-1
Run the application with the specified kwargs on data
def __call__(self,data=None, remove_tmp=True): input_handler = self.InputHandler suppress_stdout = self.SuppressStdout suppress_stderr = self.SuppressStderr if suppress_stdout: outfile = FilePath('/dev/null') else: outfile = self.getTmpFilename(self.TmpDir) if suppress_stderr: errfile = FilePath('/dev/null') else: errfile = FilePath(self.getTmpFilename(self.TmpDir)) if data is None: input_arg = '' else: input_arg = getattr(self,input_handler)(data) # Build up the command, consisting of a BaseCommand followed by # input and output (file) specifications command = self._command_delimiter.join(filter(None,\ [self.BaseCommand,str(input_arg),'>',str(outfile),'2>',\ str(errfile)])) if self.HaltExec: raise AssertionError, "Halted exec with command:\n" + command # The return value of system is a 16-bit number containing the signal # number that killed the process, and then the exit status. # We only want to keep the exit status so do a right bitwise shift to # get rid of the signal number byte tmp_dir = ''.join([self.WorkingDir, 'tmp']) mkdir(tmp_dir) exit_status = system(command) >> 8 rmdir(tmp_dir) # Determine if error should be raised due to exit status of # appliciation if not self._accept_exit_status(exit_status): raise ApplicationError, \ 'Unacceptable application exit status: %s, command: %s'\ % (str(exit_status),command) # open the stdout and stderr if not being suppressed out = None if not suppress_stdout: out = open(outfile,"r") err = None if not suppress_stderr: err = open(errfile,"r") result = CommandLineAppResult(out,err,exit_status,\ result_paths=self._get_result_paths(data)) # Clean up the input file if one was created if remove_tmp: if self._input_filename: for f in self._input_filename: remove(f) self._input_filename = None return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, **kwargs):\n app = self.create_app()\n\n app.run(host=self.host, port=self.port, **kwargs)", "def run(self, **kwargs):", "def run(self, **kwargs):\n pass", "def run(self, *args, **kwargs):\n pass", "def run(self, args, **kwargs):\n raise NotImplementedError()", "def __call__(self, *args, **kwargs):\n with app.app_context(): # pragma: no cover\n return self.run(*args, **kwargs)", "def run(self, **kwargs: Any) -> None:\n raise NotImplementedError", "def run(self, *args, **kwargs) -> typing.Any:\n pass", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def _run_kwargs(cls, kwargs: Dict[str, Any]):\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)", "def main(self, **kwargs) -> None:\n ...", "def main(cls, *args, **kwargs):\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)", "def do_view_data(self, *args):\n with suppress(SystemExit):\n if str(*args).split(' ')[0] == '':\n command = self.cli.view_parser.parse_args(*args)\n else:\n command = self.cli.view_parser.parse_args(str(*args).split(' '))\n command.func(**vars(command))", "def run(self, *args, **kwargs):\n # @@@ async? (consider how args and kwargs should be serialized;\n # that may change things quite a bit)\n self.process((args, kwargs))", "def __call__(self, args, app: Application = None) -> None:\n raise NotImplementedError", "def kwargs(kwargs):\n run_kwargs(kwargs)", "def run(self, **kwargs) -> dict:\n self.input_specification.validate_kwargs(**kwargs)\n raw_results = self.run_interface(**kwargs)\n return self.extract_results(raw_results)", "def run(self):\n self.fn(*self.args, **self.kwargs)", "def _run(self, *args, **kwargs):\n raise NotImplementedError", "def run(self, args):\n pass", "def __call__(self, app: App, **kwargs: Any) -> None:\n ...", "def __call__(self, data, **kwargs):", "def run(self, args):\n\n return", "def run(self, data):\n\n if data and self.application:\n # Build tuples for embedding index\n if self.application.embeddings:\n data = [(x, element, None) for x, element in enumerate(data)]\n\n # Process workflow\n with st.spinner(\"Running workflow....\"):\n results = []\n for result in self.application.workflow(self.name, data):\n # Store result\n results.append(result)\n\n # Write result if this isn't an indexing workflow\n if not self.application.embeddings:\n st.write(result)\n\n # Store workflow results\n self.data = results", "def run(d):\n global DATA # global is ugly but quick. # pylint: disable=W0603\n DATA = d\n\n logging.info('Listening on http://localhost:8020')\n APP.run(port=8020, debug=True)", "def __call__(self, *args, **kwargs):\n return self.run(*args, **kwargs)", "def run_dataflow(self, *args, **kwargs):\n raise NotImplementedError", "def quick_run(self, *args):\n self.inputs(*args)\n self.run()", "def main(args=None):\n app()\n return 0", "def Run(self, *args, **kwargs):\n\t\tpayload = { \"Arg1\": self.href }\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\n\t\treturn self._execute('run', payload=payload, response_object=None)", "def apply(cls, args, run):\n pass", "def run(self):\n\n input_args = {}\n self._execute(input_args, self.args)", "def __call__( self, *args, **kw ):\n return self.run( *args, **kw )", "def __init__(self, application, data):\n self.application = application\n self.data = data", "def Run(self, args):\n pass", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def _run ( self ) :\n raise NotImplementedError ( \"AppBase: method _run() should be implemented in a subclass\" )", "def run(self, *args: Any, **kwargs: Any) -> int:\n raise NotImplementedError", "def __call__(self, args, kwargs):\n raise NotImplementedError", "def run_atoml_app():\n data = flask.request.json\n features, output = _get_output(data)\n return_dict = {'input': data, 'features': features, 'output': output}\n return_dict = flask.jsonify(**return_dict)\n\n return return_dict", "def run(self, *args, **kwargs):\n raise NotImplementedError('Tasks must define the run method.')", "def run(self, data):\n\t\t# no processing here\n\t\treturn data", "def main(**kwargs):\n\n wrap_predict(kwargs['input'],\n kwargs['output'],\n kwargs['pipeline'])", "def main(*args,**kwargs):\n args = args or [a for a in sys.argv if not a.startswith('--')]\n print('EventTestDS.main(%s)'%args)\n py = PyTango.Util(['EventTestDS',args[1]])\n py.add_TgClass(EventTestDSClass,EventTestDS,'EventTestDS')\n U = PyTango.Util.instance()\n U.server_init()\n if int(kwargs.get('run',True)):\n U.server_run()\n else:\n print('Server initialized, waiting for PyTango.Util.instance().server_run() to start processing events')\n return U", "def setup(self, app_args):\n raise NotImplementedError", "def run(self, *args, **kwargs):\n info_list = self._merge()\n self._run(info_list)", "def run(self, args: argparse.Namespace) -> None:\n pass", "def exec(self,**kwargs):\r\n pass", "def main(run_app, config_filename=None):\n\n if not config_filename:\n parser = ThrowingArgumentParser(description='Provide a RESTful API service from the order database.')\n parser.add_argument('config_file', help='JSON configuration file', type=argparse.FileType('r'))\n args = parser.parse_args()\n\n app = create_app(args.config_file.name)\n\n else:\n app = create_app(config_filename)\n\n if 'flask-debug' in app.config:\n do_debug = app.config['flask-debug']\n else:\n do_debug = False\n\n if run_app:\n if app.config['listening_ip'] and app.config['listening_port']:\n app.run(host=app.config['listening_ip'], port=app.config['listening_port'], debug=do_debug)\n else:\n # expect a nginx environment\n app.run(debug=do_debug)", "def _run(data):\n try:\n func, args, kwds = cPickle.loads(data)\n except Exception, e:\n raise deferred.PermanentTaskFailure(e)\n \n try:\n func(*args, **kwds)\n except TypeError:\n logging.debug(\"Deferred function arguments: %s %s\", args, kwds)\n raise", "def __call__(self, app: App, **kwargs: Any) -> bool:\n ...", "def main(args):\n app = Application()\n if args and args.markets:\n app.set_markets(args.markets)\n if args and args.symbols:\n app.set_symbols(args.symbols)\n app.print_message()\n\n if args and app.markets:\n file_path = './../'\n\n scrapper = scrapping.Scrapper(app.markets)\n scrapper.get_symbols(f\"{file_path}data/stocks.json\")\n\n if len(app.symbols) > 0:\n companies = {}\n for symbol in app.symbols:\n file_name = f\"{file_path}data/{symbol}_financials.json\"\n companies[symbol] =\\\n scrapper.get_fundamental_analysis(symbol,\n file_name)\n print(companies)\n analysis_companies = analysis.Analyze(companies, app.symbols)\n result = analysis_companies.calculate()\n print(result)\n\n logger.info(args)", "def call(self, **params):\n # NOTE - use __call__??\n # TODO - move exec_script here?\n # TODO - call should handle param defaults\n from datapane.runner.exec_script import run\n\n run(self, params)", "def start( *args, **kwargs ):", "def run(self,*args,**kwargs):\n print(\"[TEMPLATE ENGINE] 'run' function not implemented\")\n pass", "def execute(self, *args, **kwargs):", "def execute(self, *args, **kwargs):", "def call_launch_app_callback(self, app_name, **kwargs):\n raise NotImplementedError", "def main(self, params):\n pass", "def start(**kwargs):\n # Project\n\n CustomWSGI(\n app=\"stats.api.main:api\",\n options={\n \"worker_class\": \"uvicorn.workers.UvicornWorker\",\n \"preload\": True,\n \"keepalive\": 10,\n \"command\": shutil.which(\"gunicorn\"),\n \"bind\": \":\".join(\n (format_listen_address(params.listen_address), str(params.listen_port))\n ),\n \"workers\": workers,\n \"loglevel\": loglevel,\n \"accesslog\": \"-\",\n \"errorlog\": \"-\",\n # \"logconfig_dict\": {\"formatters\": {\"generic\": {\"format\": \"%(message)s\"}}},\n **kwargs,\n },\n ).run()", "def call(self, *args, **kwargs):", "def run(self, **kwargs):\n self.runKwargs = kwargs\n assert self.initialized, \"Pypet environment not initialized yet.\"\n self._t_start_exploration = datetime.datetime.now()\n self.env.run(self.evalFunction)\n self._t_end_exploration = datetime.datetime.now()", "def main(args=None):", "def main(args=None):", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def start_app():\n args = parse_args()\n data = MutualExclusionData(args)\n if args.method == 'pingpong':\n ping_pong.create_all(data)\n elif args.method == 'ricart_agrawala':\n ricart_agrawala.create_all(data)\n elif args.method == 'lamport':\n lamport.create_all(data)\n else:\n raise ValueError(\n 'Unsupported method. Please choose pingpong or ricart_agrawala')\n\n print 'Starting processes'\n try:\n for proc in data.processes:\n proc.start()\n for proc in data.processes:\n proc.join()\n finally:\n data.close()\n print 'Pipe closed'", "def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)", "def from_data(cls, app, configuration):", "def Run(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"run\", payload=payload, response_object=None)", "def Run(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"run\", payload=payload, response_object=None)", "def setup(self, **kwargs):\n if kwargs.get(\"attach\", False):\n log(\n logging.DEBUG,\n DataCategory.ONLY_PUBLIC_DATA,\n \"Creating instance of azureml.core.run.Run\",\n )\n # IMPORTANT: keep this import outside of top level\n # because this requires aml connection\n try:\n from azureml.core.run import Run\n\n self._recipe_azureml_run = Run.get_context(allow_offline=True)\n except:\n # if running in detonation chamber, this will fail due to lack of connectivity\n log(\n logging.CRITICAL,\n DataCategory.ONLY_PUBLIC_DATA,\n \"Obtaining Run.get_context() resulted in an exception: {}\".format(\n traceback.format_exc()\n ),\n )\n self._recipe_azureml_run = None\n\n elif \"run\" in kwargs:\n self._recipe_azureml_run = kwargs.get(\"run\")\n\n if \"output_file\" in kwargs:\n self._metrics_file_path = kwargs.get(\"output_file\")\n log(\n logging.INFO,\n DataCategory.ONLY_PUBLIC_DATA,\n \"Will write metrics in {}\".format(self._metrics_file_path),\n )", "def run(self, **options):\n try:\n application = App()\n application.listen(self.port, self.addr)\n tornado.ioloop.IOLoop.current().start()\n except Exception, e:\n self.stdout.write('run error. Detail error info as follows: %s' % e)", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def execute(self, *args, **kwargs):\n pass", "def main(passed_arguments):\n\n # use real data as default\n scripts_path = os.path.abspath(os.path.join(PYTHON_PATH, 'scripts'))\n meta_path = os.path.abspath(os.path.join(scripts_path, 'meta.json'))\n manifest_path = os.path.abspath(os.path.join(scripts_path, 'manifest.csv'))\n\n # Locally, we can optionally have sample data\n if passed_arguments.sample and passed_arguments.database != 'remote':\n meta_path = os.path.abspath(os.path.join(scripts_path,\n 'meta_sample.json'))\n manifest_path = os.path.abspath(\n os.path.join(scripts_path, 'manifest_sample.csv'))\n\n # for case of more than one database choice default to the option with\n # the lowest risk if database is updated\n if passed_arguments.database == 'docker':\n database_choice = 'docker_database'\n drop_tables = True\n\n elif passed_arguments.database == 'docker_local':\n database_choice = 'docker_with_local_python'\n drop_tables = True\n\n elif passed_arguments.database == 'remote':\n database_choice = 'remote_database'\n drop_tables = False #TODO this is a hacky way to avoid dropping tables because it's not working with RDS...\n\n # Only users with additional admin privileges can rebuild the\n # remote database\n if not passed_arguments.update_only:\n database_choice = 'remote_database_master'\n\n # TODO: do we want to default to local or docker?\n elif passed_arguments.database == 'local':\n database_choice = 'local_database'\n drop_tables = True\n\n # universal defaults\n keep_temp_files = True\n\n # Instantiate and run the loader\n loader = LoadData(database_choice=database_choice, meta_path=meta_path,\n manifest_path=manifest_path,\n keep_temp_files=keep_temp_files,\n drop_tables=drop_tables)\n\n if passed_arguments.update_only:\n loader.update_database(passed_arguments.update_only)\n else:\n loader.rebuild()\n\n\n\n #TODO add in failures report here e.g. _failed_table_count", "def execute(self, data, options):\n raise NotImplementedError()", "def run(self, *args, **kwargs):\n return self.func(self, *args, **kwargs)", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def main(arguments):\n auth = (arguments['username'], arguments['token'])\n data_collector = DataCollector(arguments['repo name'],\n arguments['start date'],\n arguments['end date'], auth,\n arguments['all'], arguments['page'])\n data_collector.collect_signals()", "def run(_):\n pass", "def create_and_run():\n\n app = App()\n app.run()", "def render(data_dict, *args, **kwargs):", "def main(argv: Optional[List[str]] = None) -> None: # pragma: no cover, proxy\n if argv is None:\n argv = sys.argv[1:]\n\n app = reducto.Reducto()\n app.run(argv)", "def run(user_params, api_params):\n\n logger.info(\"api_params: \" + str(api_params))\n logger.info(\"user_params: \" + str(user_params[\"params\"]))\n # combine two dict api_params will overwrite user_params\n params = {**user_params[\"params\"], **api_params}\n logger.info(\"combined params: \" + str(params))\n\n if \"model\" in user_params and user_params[\"model\"]:\n data_in = [\n Data(dataname, source, params).data\n for dataname, source in user_params[\"model\"]\n ]\n else:\n data_in = None\n\n # str or str list : handler name is\n # one string of handler name\n # or list of handler name\n handler = han.ReportsHandler(handler=user_params[\"handler\"])\n data = handler.handle(data_in, params)\n filters = user_params[\"filters\"]\n\n return {\"data\": data, \"filters\": filters}", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main(args):\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)", "def run_on_host(self, *args, **kwargs) -> Any:\n raise NotImplementedError", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def run(app, server, bind=\"localhost:8080\", **kwargs):\n\n # Compose application name\n if isinstance(app, str):\n appname = app\n if \":\" not in appname:\n raise ValueError(\"If specifying an app by name, give its full path!\")\n else:\n appname = app.__module__ + \":\" + app.__name__\n\n # Check server and bind\n assert isinstance(server, str), \"asgineer.run() server arg must be a string.\"\n assert isinstance(bind, str), \"asgineer.run() bind arg must be a string.\"\n assert \":\" in bind, \"asgineer.run() bind arg must be 'host:port'\"\n bind = bind.replace(\"localhost\", \"127.0.0.1\")\n\n # Select server function\n try:\n func = SERVERS[server.lower()]\n except KeyError:\n raise ValueError(f\"Invalid server specified: {server!r}\")\n\n # Delegate\n return func(appname, bind, **kwargs)", "def main(args):" ]
[ "0.6900019", "0.6881134", "0.68657255", "0.68310666", "0.6701029", "0.66893464", "0.65126204", "0.64587903", "0.64343816", "0.64343816", "0.6430925", "0.64042205", "0.6383089", "0.6359522", "0.63256764", "0.6306275", "0.62514395", "0.62414396", "0.61845434", "0.6160434", "0.6138743", "0.6111847", "0.60871905", "0.6080326", "0.60118914", "0.5931047", "0.5867854", "0.58137906", "0.57931674", "0.57691634", "0.57563955", "0.57299954", "0.57286954", "0.5723009", "0.57042414", "0.5680163", "0.5670324", "0.5660554", "0.5660554", "0.5657226", "0.56168056", "0.5609838", "0.55908483", "0.55644184", "0.5560377", "0.55576193", "0.5542036", "0.55166227", "0.5510427", "0.5496817", "0.5485827", "0.5482526", "0.5482438", "0.54767823", "0.54732263", "0.5456418", "0.5455926", "0.5445192", "0.53940815", "0.53940815", "0.53860176", "0.5385962", "0.5383177", "0.53800637", "0.5377423", "0.53665686", "0.53665686", "0.5354165", "0.5339741", "0.5338693", "0.5333905", "0.5331644", "0.5331644", "0.53306955", "0.5328714", "0.53279334", "0.53279334", "0.5318252", "0.53088206", "0.5307776", "0.52900755", "0.5286069", "0.52764213", "0.527364", "0.52680546", "0.5260545", "0.52597386", "0.5250275", "0.52445805", "0.52445805", "0.52445805", "0.52445805", "0.52445805", "0.52445805", "0.52445805", "0.52445805", "0.52331346", "0.523098", "0.522573", "0.5223555", "0.52213347" ]
0.0
-1
Retry calling the decorated function using an exponential backoff.
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None): def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck, e: msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) if logger: logger.warning(msg) else: print msg time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retry(tries, delay=3, backoff=2, except_on=(Exception, )):\n\n tries = math.floor(tries)\n\n def decorator(f):\n def f_retry(*args, **kwargs):\n return function_retry(\n tries, delay, backoff, except_on, f, *args, **kwargs)\n return f_retry # true decorator -> decorated function\n return decorator # @retry(arg[, ...]) -> true decorator", "def retry(func):\n # ... retry MAX_RETRIES times\n # ...\n # make sure you include this for testing:\n # except Exception as exc:\n # print(exc)\n # ...\n # and use wraps to preserve docstring\n #\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n tries = MAX_RETRIES\n while tries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as err:\n print(err)\n\n tries -= 1\n\n raise MaxRetriesException\n\n return wrapper", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def retry(exception, tries=10, delay=1, backoff=2, max_delay=30):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n m_tries, m_delay = tries, delay\n while m_tries > 1:\n try:\n return f(*args, **kwargs)\n except exception:\n time.sleep(min(m_delay, max_delay))\n m_tries -= 1\n m_delay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def auto_retry(fun):\n\n @functools.wraps(fun)\n def decorated(instance, *args, **kwargs):\n \"\"\"Wrapper around a decorated function.\"\"\"\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error\n\n return decorated", "def decorated(instance, *args, **kwargs):\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error", "def _retry_provider_call(self, func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n max_retries = 29\n attempts = 0\n while attempts < max_retries:\n try:\n return func(*args, **kwargs)\n except ClientError as e:\n attempts += 1\n raise RetryLimitExceededError(\n \"Exceeded request limit {} times. Aborting.\".format(max_retries)\n )\n return decorated", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def retry(nattempts, exception=None):\n \n def tryIt(func):\n def wrapper(*args, **kwargs):\n attempts = 0\n while attempts < nattempts - 1:\n try:\n return func(*args, **kwargs)\n except (exception if exception is not None else Exception):\n attempts += 1\n return func(*args, **kwargs)\n return wrapper\n return tryIt", "def retry(retries=5):\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n count = 0\n while True:\n try:\n return fn(*args, **kwargs)\n except (\n BadRequest,\n BadResponseException,\n ReadTimeout,\n RequestException,\n TraktBadGateway,\n TraktUnavailable,\n TraktInternalException,\n ) as e:\n if count == retries:\n logger.error(f\"Error: {e}\")\n\n if isinstance(e, BadResponseException):\n logger.error(f\"Details: {e.details}\")\n if isinstance(e, TraktInternalException):\n logger.error(f\"Error message: {e.error_message}\")\n\n logger.error(\n \"API didn't respond properly, script will abort now. Please try again later.\"\n )\n logger.error(\n f\"Last call: {fn.__module__}.{fn.__name__}({args[1:]}, {kwargs})\"\n )\n exit(1)\n\n seconds = 1 + count\n count += 1\n logger.warning(\n f\"{e} for {fn.__module__}.{fn.__name__}(), retrying after {seconds} seconds (try: {count}/{retries})\"\n )\n sleep(seconds)\n\n return wrapper\n\n return decorator", "def retrying(cls, fn, retries_allowed=None, wait_seconds=None, wait_increment=None, wait_multiplier=None):\n # A special name_key of 'anonymous' is the default, which causes there not to be a name key.\n # This cannot work in conjunction with RetryManager because different calls may result in different\n # function values at the same point in code. -kmp 8-Jul-2020\n decorator_function = Retry.retry_allowed(\n name_key='anonymous', retries_allowed=retries_allowed, wait_seconds=wait_seconds,\n wait_increment=wait_increment, wait_multiplier=wait_multiplier\n )\n return decorator_function(fn)", "def _retry(func):\n @wraps(func)\n def _retry_wrapper(self, *args, **kwargs):\n error_message = \"\"\n for retry in range(self.retries + 1):\n try:\n return func(self, *args, **kwargs)\n except ValueError as err:\n error_message = str(err)\n raise ValueError(str(error_message))\n return _retry_wrapper", "def retry_task(func):\n\n @wraps(func)\n def wrapper(task, *args, **kwargs):\n retries = task.request.retries\n exponential = 2 ** retries\n exponential_backoff = random.randint(exponential, exponential * 2)\n try:\n result = func(task, *args, **kwargs)\n except Exception as e:\n logger.error(\n f\"Retriying {task.request.id} after {exponential_backoff} seconds\"\n )\n raise task.retry(countdown=exponential_backoff, exc=e, max_retries=5)\n\n return result\n\n return wrapper", "def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')", "def retry(exceptions=Exception, tries=3, delay=1):\n\n def retry_decorator(func):\n def func_wrapper(*args, **kwargs):\n _tries = tries\n while _tries:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n _tries -= 1\n if not _tries:\n raise\n\n time.sleep(delay)\n\n return func_wrapper\n\n return retry_decorator", "def retry(exception, tries=10, delay=3, backoff=0.1):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exception as ex:\n print \"{0}, Retrying in {1} seconds...\".format(ex, mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def retryable(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n retries = 0\n max_retries = kwargs.get(\"max_retries\", DEFAULT_RETRIES)\n backoff = kwargs.get(\"backoff\", default_backoff)\n while retries <= max_retries:\n try:\n return func(*args, **kwargs)\n except IntegrityError:\n logging.debug(\n \"Race-condition caught? ({}/{} retries)\".format(retries, max_retries)\n )\n if retries >= max_retries:\n logging.error(f\"Unable to execute {func}, max retries exceeded\")\n raise\n retries += 1\n backoff(retries, max_retries)\n\n return wrapper", "def retry(tries, delay=3, backoff=2):\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay # make mutable\n err = None\n while mtries > 0:\n print(\"Trial Number:\" + str(mtries))\n try:\n rv = f(*args, **kwargs)\n except DBException as e:\n print(\"Retry..\")\n mtries -= 1 # consume an attempt\n time.sleep(mdelay) # wait...\n mdelay += backoff # make future wait longer\n err = e\n\n # except Exception as e:\n # print(str(e))\n # mtries -= 1 # consume an attempt\n # time.sleep(mdelay) # wait...\n # mdelay += backoff # make future wait longer\n # err = e\n else:\n return rv\n raise err\n\n return f_retry # true decorator -> decorated function\n\n return deco_retry # @retry(arg[, ...]) -> true decorator", "def _retry(method, max_tries=5, backoff_s=1):\n\n @wraps(method)\n def method_with_retries(self, *args, **kwargs):\n try_count = 0\n while try_count < max_tries:\n try:\n return method(self, *args, **kwargs)\n except BrokenPipeError:\n logger.warning(\"Caught a BrokenPipeError. Retrying.\")\n try_count += 1\n if try_count < max_tries:\n self._construct_clients()\n time.sleep(backoff_s)\n else:\n raise\n\n return method_with_retries", "def retry(maxRetries, *exceptions):\n def _doDecoration(fn):\n def _doRetry(*args, **kwargs):\n retries = 0\n while retries <= maxRetries:\n try:\n return fn(*args, **kwargs)\n except tuple(exceptions):\n retries +=1\n if retries > maxRetries:\n raise\n \n return _doRetry\n return _doDecoration", "def url_socket_retry(func, *args, **kw):\n min_delay = 1\n max_delay = 32\n max_attempts = 4\n\n for idx, delay in enumerate(\n backoff_delays(min_delay, max_delay, jitter=True)):\n try:\n return func(*args, **kw)\n except HTTPError as err:\n if not (err.status == 503 and 'Slow Down' in err.reason):\n raise\n if idx == max_attempts - 1:\n raise\n except URLError as err:\n if not isinstance(err.reason, socket.error):\n raise\n if err.reason.errno not in (104, 110):\n raise\n if idx == max_attempts - 1:\n raise\n\n time.sleep(delay)", "def wrap(fn):\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def retry(times: int, on_exceptions: List[Exception]):\n def decorator(function: Callable):\n @wraps(function)\n def wrapper(*args, **kwargs):\n raised = []\n for _ in range(times):\n try:\n return function(*args, **kwargs)\n except Exception as ex:\n raised.append(ex)\n if type(ex) not in on_exceptions:\n raise RetryError(\n 'An unexpected error occurred while calling the function '+\n f'{function.__name__}.'\n ) from ex\n raise raised.pop()\n return wrapper\n return decorator", "def retry(func, *args, **kwargs):\n @functools.wraps(func)\n def wrapper(*w_args, **w_kwargs):\n w_kwargs.update(kwargs)\n return retry_function_on_deadlock(func, *w_args, **w_kwargs)\n\n return wrapper", "def retry_multi(max_retries=5):\n\n def retry(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_retries = 0\n ret = None\n while num_retries <= max_retries:\n try:\n ret = func(*args, **kwargs)\n break\n except Exception as e:\n logger.exception(e)\n if num_retries == max_retries:\n raise\n num_retries += 1\n time.sleep(5)\n return ret\n\n return wrapper\n\n return retry", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n logging.warning('%s, Retrying in %d seconds...', str(e), mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry", "def test_retry(self):\n retries = [0]\n max_tries = 5\n\n @retry(Exception, max_retries=5)\n def f():\n retries[0] += 1\n raise Exception(\"Faulty function\")\n\n with self.assertRaises(Exception):\n f()\n\n self.assertEqual(max_tries, retries[0])", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n\tdef deco_retry(f):\n\t\t@wraps(f)\n\t\tdef f_retry(*args, **kwargs):\n\t\t\tmtries, mdelay = tries, delay\n\t\t\twhile mtries > 1:\n\t\t\t\ttry:\n\t\t\t\t\treturn f(*args, **kwargs)\n\t\t\t\texcept ExceptionToCheck, e:\n\t\t\t\t\tmsg = \"func: '{}' > exc: {}, Retrying in {} seconds...\".format(str(f.__name__), str(e), mdelay)\n\t\t\t\t\tif logger:\n\t\t\t\t\t\tlogger.warning(msg)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint msg\n\t\t\t\t\ttime.sleep(mdelay)\n\t\t\t\t\tmtries -= 1\n\t\t\t\t\tmdelay *= backoff\n\t\t\treturn f(*args, **kwargs)\n\t\treturn f_retry\t# true decorator\n\treturn deco_retry", "def retry(attempts_number, delay=0, step=0, max_delay=-1,\n retry_on=Exception, logger=None):\n\n def decorator(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n current_logger = logger\n\n attempts = 1\n retry_delay = delay\n\n try:\n if isinstance(args[0], object):\n current_logger = args[0].get_logger()\n except (AttributeError, IndexError):\n pass\n\n if isinstance(retry_on, (types.FunctionType,\n types.MethodType,)):\n catch_strategy = CatchFunctionStrategy(retry_on)\n else:\n catch_strategy = CatchExceptionStrategy(retry_on)\n\n while attempts <= attempts_number or attempts_number < 0:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if catch_strategy.need_to_retry(e):\n if attempts >= attempts_number >= 0:\n raise\n elif current_logger:\n retry_count = \"inf\" if attempts_number < 0 \\\n else attempts_number - 1\n\n current_logger.warning(\n \"Retry: Call to %(fn)s failed due to \"\n \"%(exc_class)s: %(exc)s, retry \"\n \"attempt #%(retry_no)s/\"\n \"%(retry_count)s after %(delay)ss\",\n dict(fn=func.__name__,\n exc=str(e),\n retry_no=attempts,\n exc_class=e.__class__.__name__,\n retry_count=retry_count,\n delay=retry_delay))\n time.sleep(retry_delay)\n attempts += 1\n retry_delay += step\n if 0 <= max_delay < retry_delay:\n retry_delay = max_delay\n else:\n raise\n return wrapper\n return decorator", "def test_retry_raises_error_on_negative_retries(self):\n\n @retry(Exception, max_retries=-1)\n def f():\n raise Exception(\"Faulty function\")\n\n self.assertRaises(ValueError, f)", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(exceptions, tries=3, delay=2, _logger=logger()):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = '{}, Retrying in {} seconds...'.format(e, mdelay)\n _logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def _retry_refresh(wrapper, *a3, **k3):\n return func(wrapper, *a3, **k3)", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\r\n def deco_retry(f):\r\n\r\n @wraps(f)\r\n def f_retry(*args, **kwargs):\r\n mtries, mdelay = tries, delay\r\n while mtries > 1:\r\n try:\r\n return f(*args, **kwargs)\r\n except ExceptionToCheck as e:\r\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\r\n if logger:\r\n logger.warning(msg)\r\n else:\r\n print (msg)\r\n time.sleep(mdelay)\r\n mtries -= 1\r\n mdelay *= backoff\r\n return f(*args, **kwargs)\r\n return f_retry # true decorator\r\n return deco_retry", "def exp_backoff_fn(fn, *args):\n if not on_win:\n return fn(*args)\n\n import time\n import errno\n max_tries = 6 # max total time = 6.4 sec\n for n in range(max_tries):\n try:\n result = fn(*args)\n except (OSError, IOError) as e:\n if e.errno in (errno.EPERM, errno.EACCES):\n if n == max_tries - 1:\n raise Exception(\"max_tries=%d reached\" % max_tries)\n time.sleep(0.1 * (2 ** n))\n else:\n raise e\n else:\n return result", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def retry(callback, retries, sleep=0.5, catch=Exception, *args, **kwargs):\n r = 0\n while r < retries:\n r += 1\n try:\n return callback(*args, **kwargs)\n except catch as c:\n if r == retries:\n raise c\n else:\n time.sleep(r * sleep)", "def retry(retry_times=3, interval=0.5, exceptions=Exception):\n def _decorator(func):\n @wraps(func)\n def _wrapped_func(*args, **kwargs):\n for attempt in range(1, retry_times + 1):\n try:\n return func(*args, **kwargs)\n except exceptions: # pylint: disable=broad-except\n if attempt < retry_times:\n logger.debug(\"%s failed in No. %d attempt\", func, attempt)\n import traceback\n import time\n logger.debug(traceback.format_exc())\n time.sleep(interval)\n else:\n raise # End of retry. Re-raise the exception as-is.\n return _wrapped_func\n return _decorator", "def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)", "def smart_retry(f):\n # type: (Callable) -> CallableT\n\n @functools.wraps(f)\n def wrapper(api_instance, *args, **kwargs):\n # type: (UnifiAPI, *Any, **Any) -> Any\n try:\n return f(api_instance, *args, **kwargs)\n except Unauthorized as e:\n\n api_instance.log.debug(\n \"An exception occurred when executing %s: %s. Refreshing the connection to the Controller and retrying\",\n f.__name__,\n e,\n )\n api_instance.connect()\n return f(api_instance, *args, **kwargs)\n\n except Exception:\n raise\n\n return cast(CallableT, wrapper)", "def retriable(*retry_args, **retry_kwargs):\n def _retriable_factory(func):\n @wraps(func)\n def _retriable_wrapper(*args, **kwargs):\n return retry(func, args=args, kwargs=kwargs, *retry_args,\n **retry_kwargs)\n return _retriable_wrapper\n return _retriable_factory", "def _timeout_retry(func, *args, **kwargs):\n tried = kwargs.pop('_____retires', 0)\n try:\n q = func(*args, **kwargs)\n except (TimeoutError, TableParseError) as exc:\n if tried >= MAX_RETRIES_TIMEOUT:\n raise TimeoutError(f'TimeOut obtained in {MAX_RETRIES_TIMEOUT}'\n ' tries, aborting.') from exc\n return _timeout_retry(func, *args, **kwargs, _____retires=tried+1)\n return q", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except ExceptionToCheck, e:\n if logger:\n msg = getMessage(\"en\", \"retrying-notification\").format(str(e), mdelay)\n logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry # true decorator\n return deco_retry", "def wrapped_fn(*args, **kwargs):\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)", "def call_with_retries(function, max_retries=10,\n exception_types=(Exception),\n _args=(), _kwargs={}):\n assert max_retries >= 0\n\n retries = 0\n last_exc = Exception('Unknown exception')\n while retries <= max_retries:\n try:\n return function(*_args, **_kwargs)\n except exception_types as exc:\n retries += 1\n wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000)\n time.sleep(wait)\n last_exc = exc\n raise last_exc", "def retry(func, repeat=3, delay=tickTime * 2):\n\twhile repeat:\n\t\tresult = func()\n\n\t\tif result is None and delay and repeat != 1:\n\t\t\tsleep(delay)\n\n\t\telse:\n\t\t\treturn result\n\n\t\trepeat -= 1", "def execute_with_retry(f, args=[], kwargs={}, retry_on=(Exception,),\n max_tries=3, sleep=5):\n attempt = 0\n result = None\n while attempt < max_tries:\n attempt += 1\n try:\n result = f(*args, **kwargs)\n break\n except retry_on, e:\n if attempt >= max_tries:\n raise e\n log(\"Function call failed ('%s': %i/%i).\\n\"\n \"Reason: %s.\\n\"\n \"Wait for %i sec before retry...\"\n % (f.__name__, attempt, max_tries, str(e), sleep))\n time.sleep(sleep)\n return result", "def retry(exception_to_check, tries=4, delay=0.5, backoff=2):\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except exception_to_check, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n logging.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry\n return deco_retry", "def retry_query(tries=3, delay=1):\n\n def retry_wrapper(func):\n \"\"\"Wrapper function.\n :params func: function to call\n :return: wrapper function\n \"\"\"\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n \"\"\"Inner wrapper function\n :params *args: list of different arguments\n *kwargs: dictionary of different arguments\n \"\"\"\n\n mtries = tries\n mdelay = delay\n\n while mtries:\n try:\n return func(*args, **kwargs)\n except Exception: # pylint: disable=broad-except\n if mtries:\n time.sleep(mdelay)\n mtries -= 1\n\n return inner\n\n return retry_wrapper", "def retry_call(\n callabl: Callable,\n args=None,\n kwargs=None,\n exceptions: Tuple[Any, ...] = (),\n retries: int = 10,\n wait: float = 0.1,\n) -> Any:\n\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n for attempt in range(1, retries + 1):\n try:\n return callabl(*args, **kwargs)\n except exceptions:\n if attempt < retries:\n time.sleep(wait)\n else:\n raise", "def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)", "def retry_allowed(cls, name_key=None, retries_allowed=None, wait_seconds=None,\n wait_increment=None, wait_multiplier=None):\n\n def _decorator(function):\n function_name = name_key or function.__name__\n function_profile = cls.RetryOptions(\n retries_allowed=cls._defaulted(retries_allowed, cls.DEFAULT_RETRIES_ALLOWED),\n wait_seconds=cls._defaulted(wait_seconds, cls.DEFAULT_WAIT_SECONDS),\n wait_increment=cls._defaulted(wait_increment, cls.DEFAULT_WAIT_INCREMENT),\n wait_multiplier=cls._defaulted(wait_multiplier, cls.DEFAULT_WAIT_MULTIPLIER),\n )\n\n check_true(isinstance(retries_allowed, int) and retries_allowed >= 0,\n \"The retries_allowed must be a non-negative integer.\",\n error_class=ValueError)\n\n # See the 'retrying' method to understand what this is about. -kmp 8-Jul-2020\n if function_name != 'anonymous':\n cls._RETRY_OPTIONS_CATALOG[function_name] = function_profile # Only for debugging.\n\n @functools.wraps(function)\n def wrapped_function(*args, **kwargs):\n tries_allowed = function_profile.tries_allowed\n wait_seconds = function_profile.wait_seconds or 0\n last_error = None\n for i in range(tries_allowed):\n if i > 0:\n if i > 1:\n wait_seconds = function_profile.wait_adjustor(wait_seconds)\n if wait_seconds > 0:\n time.sleep(wait_seconds)\n try:\n success = function(*args, **kwargs)\n return success\n except Exception as e:\n last_error = e\n if last_error is not None:\n raise last_error\n\n return wrapped_function\n\n return _decorator", "def retry(exception_to_check=AssertionError, tries=100, delay=.1):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 0:\n try:\n return f(*args, **kwargs)\n except exception_to_check, e:\n log.info('%s, Retrying in %s seconds...' % (str(e), mdelay))\n time.sleep(mdelay)\n mtries -= 1\n try_time = float(tries*delay)\n raise exception_to_check('tried for %1.1f seconds, gave up' % try_time)\n return f_retry\n return deco_retry", "def _RunWithRetries(self, callback, error_matcher):\n for i in xrange(FLAGS.gcloud_num_retries):\n try:\n return callback()\n except Exception as e: # pylint: disable=broad-except\n if not error_matcher(e):\n raise\n # Use randomized exponential backoff, like methods in\n # googleapiclient.http.\n retry_seconds = random.random() * 2**(i + 1)\n logging.warning('Request raised an error: %s\\n'\n 'Will retry in %f seconds.', e, retry_seconds)\n time.sleep(retry_seconds)\n\n return callback()", "def ensure_redis_call(f, *args, **kwargs):\n attempts = kwargs.pop('attempts', 5)\n\n for i in six.moves.range(attempts + 1):\n try:\n return f(*args, **kwargs)\n\n except (ConnectionError, TimeoutError) as e:\n if i == attempts:\n raise\n else:\n wait = 2 ** i\n msg = (\n 'Will reattempt to execute {} with args={} kwargs={} '\n 'after {} seconds due to exception {}: {}'\n ''.format(f, args, kwargs, wait, type(e).__name__, e)\n )\n print(msg)\n time.sleep(wait)", "def reprovision_and_retry(func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n errback = kw.get('errback', None)\n if errback is None:\n def errback(e):\n raise e\n def errback_wrapper(e):\n if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS:\n try:\n for initial in OPTIONS['INITIAL']:\n provision(*initial) # retry provisioning the initial setup\n func(*a, **kw) # and try the function once more\n except Exception(new_exc):\n errback(new_exc) # throwing the new exception\n else:\n errback(e) # not an instance of UnknownAppID - nothing we can do here\n kw['errback'] = errback_wrapper\n return func(*a, **kw)\n return wrapper", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def _retry_occurred(self):", "def backoff(start_sleep_time=0.1, border_sleep_time=30, factor=2, jitter=True):\n if start_sleep_time < 0.001:\n logger.warning('start_sleep_time fewer than 0.001 and will be set to 0.001')\n start_sleep_time = 0.001\n\n def decorator(target):\n @wraps(target)\n def retry(*args, **kwargs):\n attempt = 0\n while True:\n sleep_time = _sleep_time(start_sleep_time, border_sleep_time, factor, attempt, jitter)\n try:\n attempt += 1\n sleep(sleep_time)\n ret = target(*args, **kwargs)\n except Exception as e:\n logger.error(f'Exception is catched {e}')\n logger.warning(f'Wait fo {sleep_time} seconds and try again')\n else:\n return ret\n return retry\n return decorator", "def call_with_retries(function, retry_count, retry_delay):\n logger.info(\"Calling function: %s with retry count: %s, retry_delay: %s\",\n function, retry_count, retry_delay)\n for retry in range(1, int(retry_count) + 1):\n logger.info(\"Attempt number: %s\", retry)\n try:\n return function()\n # pylint: disable=broad-except\n except Exception as verify_exception:\n logger.info(\"Verify exception: %s\", verify_exception)\n time.sleep(float(retry_delay))\n if retry > int(retry_count):\n logger.info(\"Exceeded max retries! Reraising last exception\")\n raise\n assert False, \"Should never get here.\"", "def db_transaction_retry_wrapper(fn):\n @ft.wraps(fn)\n def f(self, *args, **kwargs):\n backoffGenerator = util.backoffSecondsGenerator()\n try:\n while True:\n try:\n result = fn(self, *args, **kwargs)\n return result\n except exceptions_eligible_for_retry:\n waitInSeconds = backoffGenerator.next()\n try:\n self.logger.critical('server failure in db transaction - '\n 'retry in %s seconds',\n waitInSeconds)\n except AttributeError:\n pass\n try:\n self.responsiveSleep(waitInSeconds,\n 10,\n \"waiting for retry after failure in db \"\n \"transaction\")\n except AttributeError:\n time.sleep(waitInSeconds)\n except KeyboardInterrupt:\n return\n return f", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "def retry(\n action_type: ActionType,\n default_return: Any,\n) -> Callable:\n\n def decorator(func: Callable) -> Callable:\n @wraps(func)\n def result(*args: Any, **kwargs: Any) -> List[Optional[Row]]:\n func_delay = config.execution.TASK_RETRY_DELAY\n method_name = func.__name__\n self = args[0]\n\n raised_ex = None\n for attempt in range(config.execution.TASK_RETRY_COUNT + 1):\n try:\n # Create ExecutionLog with status in_processing or retrying\n if attempt:\n self.log_retry(action_type)\n else:\n self.log_start(action_type)\n # Run access or erasure request\n return func(*args, **kwargs)\n except BaseException as ex: # pylint: disable=W0703\n func_delay *= config.execution.TASK_RETRY_BACKOFF\n logger.warning(\n f\"Retrying {method_name} {self.traversal_node.address} in {func_delay} seconds...\"\n )\n sleep(func_delay)\n raised_ex = ex\n self.log_end(action_type, raised_ex)\n return default_return\n\n return result\n\n return decorator", "def retry(initial_delay,\n max_delay,\n factor=2.0,\n jitter=0.25,\n is_retriable=None):\n if factor < 1:\n raise ValueError('factor must be >= 1; was %f' % (factor,))\n\n if jitter >= 1:\n raise ValueError('jitter must be < 1; was %f' % (jitter,))\n\n # Generator to compute the individual delays\n def delays():\n delay = initial_delay\n while delay <= max_delay:\n yield delay * random.uniform(1 - jitter, 1 + jitter)\n delay *= factor\n\n def wrap(fn):\n \"\"\"Wrapper function factory invoked by decorator magic.\"\"\"\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn\n\n return wrap", "def keep_run(exception_sleep=10):\n\n def decorated(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n logging.exception(e)\n if exception_sleep > 0:\n time.sleep(exception_sleep)\n\n return wrapped\n\n return decorated", "def retry_after(self, delay: float, request_method: Callable, *args: Any, **kwargs: Any) -> 'NetworkResponse':\n raise NotImplementedError # pragma: no cover", "def _download_retry(self, product, wait, timeout):\n\n def decorator(download):\n def download_and_retry(*args, **kwargs):\n # initiate retry loop\n start_time = datetime.now()\n stop_time = start_time + timedelta(minutes=timeout)\n product.next_try = start_time\n retry_count = 0\n not_available_info = \"The product could not be downloaded\"\n # another output for notebooks\n nb_info = NotebookWidgets()\n\n while \"Loop until products download succeeds or timeout is reached\":\n\n datetime_now = datetime.now()\n\n if datetime_now >= product.next_try:\n product.next_try += timedelta(minutes=wait)\n try:\n return download(*args, **kwargs)\n\n except NotAvailableError as e:\n if not getattr(self.config, \"order_enabled\", False):\n raise NotAvailableError(\n f\"Product is not available for download and order is not supported for\"\n f\" {self.provider}, {e}\"\n )\n not_available_info = e\n pass\n\n if datetime_now >= product.next_try and datetime_now < stop_time:\n wait_seconds = (\n datetime_now - product.next_try + timedelta(minutes=wait)\n ).seconds\n retry_count += 1\n retry_info = (\n f\"[Retry #{retry_count}] Waited {wait_seconds}s, trying again to download ordered product\"\n f\" (retry every {wait}' for {timeout}')\"\n )\n logger.debug(not_available_info)\n # Retry-After info from Response header\n if hasattr(self, \"stream\"):\n retry_server_info = self.stream.headers.get(\n \"Retry-After\", \"\"\n )\n if retry_server_info:\n logger.debug(\n f\"[{self.provider} response] Retry-After: {retry_server_info}\"\n )\n logger.info(retry_info)\n nb_info.display_html(retry_info)\n product.next_try = datetime_now\n elif datetime_now < product.next_try and datetime_now < stop_time:\n wait_seconds = (product.next_try - datetime_now).seconds + (\n product.next_try - datetime_now\n ).microseconds / 1e6\n retry_count += 1\n retry_info = (\n f\"[Retry #{retry_count}] Waiting {wait_seconds}s until next download try\"\n f\" for ordered product (retry every {wait}' for {timeout}')\"\n )\n logger.debug(not_available_info)\n # Retry-After info from Response header\n if hasattr(self, \"stream\"):\n retry_server_info = self.stream.headers.get(\n \"Retry-After\", \"\"\n )\n if retry_server_info:\n logger.debug(\n f\"[{self.provider} response] Retry-After: {retry_server_info}\"\n )\n logger.info(retry_info)\n nb_info.display_html(retry_info)\n sleep(wait_seconds)\n elif datetime_now >= stop_time and timeout > 0:\n if \"storageStatus\" not in product.properties:\n product.properties[\"storageStatus\"] = \"N/A status\"\n logger.info(not_available_info)\n raise NotAvailableError(\n f\"{product.properties['title']} is not available ({product.properties['storageStatus']})\"\n f\" and could not be downloaded, timeout reached\"\n )\n elif datetime_now >= stop_time:\n raise NotAvailableError(not_available_info)\n\n return download(*args, **kwargs)\n\n return download_and_retry\n\n return decorator", "def query_retry(self, f, *args, **kwargs):\n\n num_retries = CONF.watcher_datasources.query_max_retries\n timeout = CONF.watcher_datasources.query_timeout\n for i in range(num_retries):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n LOG.exception(e)\n self.query_retry_reset(e)\n LOG.warning(\"Retry {0} of {1} while retrieving metrics retry \"\n \"in {2} seconds\".format(i+1, num_retries, timeout))\n time.sleep(timeout)", "def i2c_retry(n):\n def decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(n-1):\n try:\n return func(*args, **kwargs)\n except OSError:\n time.sleep(0.05) # <-- allow the I2C bus to chill-out before we try again\n return func(*args, **kwargs)\n\n return func_wrapper\n\n return decorator", "def _retryProtect(m):\n\tdef f(self, *args, **kwargs):\n\t\ttry:\n\t\t\treturn m(self, *args, **kwargs)\n\t\texcept:\n\t\t\tself.reset()\n\t\t\treturn m(self, *args, **kwargs)\n\n\treturn functools.update_wrapper(f, m)", "def retry_request(\n self,\n tapi_exception,\n error_message,\n repeat_number,\n response,\n request_kwargs,\n api_params,\n **kwargs\n ):\n return False", "def retry_on_deadlock(func):\n @functools.wraps(func)\n def decorate(*args, **kw):\n # We can't use RetryDecorator from oslo_service directly because\n # it runs a decorated function in a different thread and hence\n # the function doesn't have access to authentication context\n # set as a thread local variable.\n # The solution is to reuse RetryDecorator but explicitly set\n # auth context in the new thread that RetryDecorator spawns.\n # In order to do that we need an additional helper function.\n\n auth_ctx = ctx.ctx() if ctx.has_ctx() else None\n\n return _with_auth_context(auth_ctx, func, *args, **kw)\n\n return decorate", "def retry_on_exception(func, num_tries=40, period_in_seconds=DEFAULT_PERIOD,\n error=None):\n for x in range(num_tries):\n try:\n return func()\n except Exception as e:\n if error and e.error_code == error:\n logging.info(\"Skipping on exception %s\" % error)\n break\n if x == (num_tries - 1):\n raise RuntimeError(\"Failed on %d tries: %s\" % (num_tries, e))\n logging.info(\"Got exception %s on try number %s...\" % (e, x))\n\n time.sleep(period_in_seconds)", "def set_retry_timeout(self, retry_timeout):", "def retry(times: int, except_callback: Optional[Callable[..., Any]] = None):\n\n def wrap(func):\n @wraps(func)\n def retry_it(*args, **kwargs):\n nonlocal times\n if times < 0: # forever\n times = 1 << 32\n\n for i in range(1, times + 1):\n try:\n r = func(*args, **kwargs)\n return r\n except Exception as err:\n if except_callback is not None:\n except_callback(err, i)\n\n if i == times:\n raise err\n\n return retry_it\n\n return wrap", "def retry_on_exception(func, max_attempts=5, ignored_exceptions=(StaleElementReferenceException, InvalidElementStateException)):\r\n attempt = 0\r\n while attempt < max_attempts:\r\n try:\r\n return func()\r\n except ignored_exceptions:\r\n world.wait(1)\r\n attempt += 1\r\n\r\n assert_true(attempt < max_attempts, 'Ran out of attempts to execute {}'.format(func))", "def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex", "def retry(self, envelope):\n # type: (RetryPolicy, Envelope) -> None\n raise NotImplementedError()", "def test_retry_run(self):\n pass", "def __call__(self, func, *args):\n\n def wrapped_func(*args, **kwargs):\n\n count = 0\n while True:\n response = func(*args, **kwargs)\n if response.status_code in range(200, 300):\n return response\n elif response.status_code >= 500:\n if count == self.retry_count:\n return response\n else:\n time.sleep(pow(2, count))\n count += 1\n continue\n else:\n return response\n\n return wrapped_func", "def _backoff_handler(details):\n LOGGER.debug('[Backoff]: Trying again in %f seconds after %d tries calling %s',\n details['wait'],\n details['tries'],\n details['target'].__name__)", "def sleep_decorator(function):\n\n def wrapper(*args, **kwargs):\n sleep(2)\n return function(*args, **kwargs)\n return wrapper", "def _retry(self, f):\n count = 0\n while True:\n try:\n return f()\n # http://initd.org/psycopg/docs/module.html#psycopg2.DatabaseError\n # handle operational error - memory allocation, unexpected disconnect\n except psycopg2.OperationalError, oe:\n count += 1\n if count < self._max_retries:\n LOGGER.warn(\"Transient Error Received %s \", oe)\n time.sleep(self._retry_period)\n else:\n LOGGER.error(\"Unrecoverable Error %s\", oe)\n raise oe\n # other database errors - integrity, internal, programming error etc\n except psycopg2.DatabaseError, de:\n LOGGER.error(\"Database Error %s\", de)\n raise de\n # interface errors\n except psycopg2.Error, e:\n raise e", "async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")", "def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):\n i = 0\n while i <= num:\n try:\n func(*args, **kwargs)\n time.sleep(delay)\n except exception: # pylint: disable=broad-except\n i += 1\n continue\n return\n raise StopIteration(\"Function did not finished successfully\")", "def _retry(self, result, method, url, params_dict, **kwargs):\n return result", "def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3", "def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)", "def test_retry_other_exception():\n\n exceptions_in = [\n RuntimeError(\"what?\"),\n NotImplementedError(\"how?\"),\n RuntimeError(\"no!\"),\n ]\n actual = []\n expected = [1.0, 1.5, 2.25]\n\n def sleep(wait: float):\n actual.append(wait)\n\n @retry(\n (NotImplementedError, RuntimeError),\n retries=4,\n delay=1.0,\n backoff=1.5,\n sleep=sleep,\n )\n def explode():\n raise exceptions_in.pop()\n\n try:\n explode()\n raise AssertionError(\"IndexError expected\")\n except IndexError:\n assert actual == expected", "def _retry_on_exception(\n exception: Union[Exception, Tuple[Exception]],\n regex: Optional[str] = None,\n max_retries: int = MAX_POLLS,\n retry_interval_s: int = POLL_INTERVAL,\n):\n\n def dec(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n def try_catch_exc():\n try:\n value = func(*args, **kwargs)\n return value\n except Exception as e:\n if not isinstance(e, exception) or (\n regex and not re.search(regex, str(e))\n ):\n raise e\n return e\n\n for _ in range(max_retries):\n ret = try_catch_exc()\n if not isinstance(ret, Exception):\n break\n time.sleep(retry_interval_s)\n if isinstance(ret, Exception):\n raise ret\n return ret\n\n return wrapper\n\n return dec", "def retry_strategy(self, retry_strat):\n self.retry_strategy = retry_strat\n return self", "def retry(action, attempts=5, sleeptime=60, max_sleeptime=5 * 60,\n sleepscale=1.5, jitter=1, retry_exceptions=(Exception,),\n cleanup=None, args=(), kwargs={}, log_args=True):\n assert callable(action)\n assert not cleanup or callable(cleanup)\n\n action_name = getattr(action, '__name__', action)\n if log_args and (args or kwargs):\n log_attempt_args = (\"retry: calling %s with args: %s,\"\n \" kwargs: %s, attempt #%d\",\n action_name, args, kwargs)\n else:\n log_attempt_args = (\"retry: calling %s, attempt #%d\",\n action_name)\n\n if max_sleeptime < sleeptime:\n log.debug(\"max_sleeptime %d less than sleeptime %d\",\n max_sleeptime, sleeptime)\n\n n = 1\n for _ in retrier(attempts=attempts, sleeptime=sleeptime,\n max_sleeptime=max_sleeptime, sleepscale=sleepscale,\n jitter=jitter):\n try:\n logfn = log.info if n != 1 else log.debug\n logfn_args = log_attempt_args + (n, )\n logfn(*logfn_args)\n return action(*args, **kwargs)\n except retry_exceptions:\n log.debug(\"retry: Caught exception: \", exc_info=True)\n if cleanup:\n cleanup()\n if n == attempts:\n log.info(\"retry: Giving up on %s\", action_name)\n raise\n continue\n finally:\n n += 1", "def backoff(\n max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,\n delay=constants.BACKOFF_DEFAULT_DELAY,\n factor=constants.BACKOFF_DEFAULT_FACTOR,\n exception_handler=always_retry,\n before_delay_handler=noop,\n after_delay_handler=noop):\n if max_tries <= 0:\n raise ValueError((\n 'Max tries must be greater than 0; got {!r}'\n ).format(max_tries))\n\n if delay <= 0:\n raise ValueError((\n 'Delay must be greater than 0; got {!r}'\n ).format(delay))\n\n if factor <= 1:\n raise ValueError((\n 'Backoff factor must be greater than 1; got {!r}'\n ).format(factor))\n\n def outter(f):\n def inner(*args, **kwargs):\n m_max_tries, m_delay = max_tries, delay # make mutable\n while m_max_tries > 0:\n try:\n retval = f(*args, **kwargs)\n except Exception as ex:\n m_max_tries -= 1 # consume an attempt\n if m_max_tries < 0:\n # run out of tries\n raise\n if exception_handler(ex):\n logger.info(\n (\n 'backoff retry for: %r (max_tries=%r, '\n 'delay=%r, factor=%r)'\n ),\n f,\n max_tries,\n delay,\n factor\n )\n before_delay_handler(ex)\n time.sleep(m_delay) # wait...\n after_delay_handler(ex)\n m_delay *= factor # make future wait longer\n else:\n # exception handler gave up\n raise\n else:\n # done without errors\n return retval\n return inner\n return outter", "def throttle(f):\n def wrapper(self, *args, **kwargs):\n if self.made_requests < self.max_requests:\n time.sleep(self.delay)\n f(self, *args, **kwargs)\n self.made_requests += 1\n else:\n raise Exception, 'maximum request limit reached'\n return wrapper", "def retry_on_bad_auth(func):\n @wraps(func)\n def retry_version(self, *args, **kwargs):\n while True:\n try:\n return func(self, *args, **kwargs)\n except trolly.ResourceUnavailable:\n sys.stderr.write('bad request (refresh board id)\\n')\n self._board_id = None\n self.save_key('board_id', None)\n except trolly.Unauthorised:\n sys.stderr.write('bad permissions (refresh token)\\n')\n self._client = None\n self._token = None\n self.save_key('token', None)\n return retry_version", "def __call__(self, func):\n timeouts = _exponential_timeout_generator(\n self._initial, self._maximum, self._multiplier, self._deadline)\n\n @general_helpers.wraps(func)\n def func_with_timeout(*args, **kwargs):\n \"\"\"Wrapped function that adds timeout.\"\"\"\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)\n\n return func_with_timeout" ]
[ "0.78631854", "0.7701622", "0.7667836", "0.7597426", "0.7546011", "0.75036615", "0.7499006", "0.74785614", "0.7439682", "0.74343145", "0.7429641", "0.74183977", "0.7401052", "0.73962057", "0.7395584", "0.73916775", "0.7338477", "0.7319612", "0.7313288", "0.7284679", "0.7164803", "0.71647084", "0.7122171", "0.71126074", "0.71059716", "0.70868623", "0.7056256", "0.7040827", "0.7015611", "0.7015428", "0.6987999", "0.69766074", "0.69751436", "0.6970963", "0.6966789", "0.69667244", "0.69037616", "0.68991053", "0.6896755", "0.6893039", "0.6870732", "0.68627405", "0.6860361", "0.68177336", "0.68096095", "0.68025106", "0.67988", "0.67674893", "0.6743596", "0.6737118", "0.6700426", "0.669805", "0.66888857", "0.6668681", "0.6665215", "0.6612386", "0.6588766", "0.6582566", "0.65811694", "0.6564233", "0.6559975", "0.65419704", "0.6517133", "0.6495875", "0.6471232", "0.64694184", "0.6462243", "0.64238995", "0.6413777", "0.64134425", "0.6409733", "0.6402356", "0.63983405", "0.63948476", "0.6381794", "0.63628477", "0.63552517", "0.63433975", "0.63337904", "0.62949985", "0.6288851", "0.6272235", "0.62547535", "0.62498933", "0.6249862", "0.6245726", "0.6235993", "0.6227074", "0.6222807", "0.61982155", "0.6186421", "0.61850315", "0.6184149", "0.6182769", "0.61729044", "0.6154224", "0.610365", "0.6073795", "0.6065072", "0.6065003" ]
0.6932827
36
Downloads a FASTA file for the proteome by organism ID
def get_fasta_by_id(proteome_id, output_file): taxid_pattern = re.compile('^\d{1,7}$') # if not taxid_pattern.match(proteome_id): # fetch file from Uniprot # raise ValueError(str(proteome_id) + ' is not a valid proteome identifier') url = UNIPROT_BASE_URL + proteome_id attempts = 0 while attempts < 3: try: response = requests.get(url) if response.status_code > 399 or response.status_code < 200: raise requests.HTTPError(response.status_code + ': ' + response.content) content = response.content if len(content) < 10: raise FastaNotFoundError() with open(output_file, 'w') as f: f.write(content) break except requests.HTTPError as e: attempts += 1 if attempts >= 3: raise FastaNotFoundError('Failed to download fasta: ' + response.status_code + ' response.content') return output_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1", "def download_proteome(proteome_id, data_dir, domain=\"Eukaryota\"):\n base = (\"ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/\"\n \"knowledgebase/reference_proteomes\")\n\n url = [base, domain, proteome_id + \".fasta.gz\"]\n outfile = os.path.join(data_dir, proteome_id + \".fasta\")\n\n with closing(request.urlopen(url)) as remote_handle:\n with open(remote_handle, \"rb\") as remote_file:\n mem_file = io.BytesIO(remote_file.read())\n\n with open(outfile, \"w\") as out, gzip.open(mem_file) as gz:\n outfile.write(gz.read())\n\n return outfile", "def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))", "def download(dataset_csv_path='ncbi_ids.csv', save_path='../data/RefSeq'):\n\n Entrez.email = \"your_email@gmail.com\"\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n with open(dataset_csv_path, 'r') as f:\n data = csv.reader(f)\n for row in data:\n microbe_id = row[0].split('.')[0]\n if os.path.exists(os.path.join(save_path, microbe_id + '.fasta')):\n continue\n\n handle = Entrez.efetch(db=\"nucleotide\", id=microbe_id,\n rettype=\"fasta\", retmode=\"text\")\n record = SeqIO.read(handle, \"fasta\")\n handle.close()\n SeqIO.write(record, os.path.join(save_path, microbe_id + \".fasta\"),\n \"fasta\")", "def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"ks2074@gmail.com\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()", "def get_genome_download_link(self, name, mask=\"soft\", **kwargs):\n genome = self.genomes[safe(name)]\n division, is_vertebrate = self.get_division(name)\n\n # base directory of the genome\n ftp = \"http://ftp.ensemblgenomes.org\"\n if is_vertebrate:\n ftp = \"http://ftp.ensembl.org\"\n version = self.get_version(name, kwargs.get(\"version\"))\n div_path = \"\" if is_vertebrate else f\"/{division}\"\n lwr_name = genome[\"name\"]\n ftp_directory = f\"{ftp}/pub/release-{version}{div_path}/fasta/{lwr_name}/dna\"\n\n # this assembly has its own directory\n if name == \"GRCh37\":\n ftp_directory = genome[\"genome\"].format(version)\n\n # specific fasta file\n cap_name = lwr_name.capitalize()\n asm_name = re.sub(r\"\\.p\\d+$\", \"\", safe(genome[\"assembly_name\"]))\n mask_lvl = {\"soft\": \"_sm\", \"hard\": \"_rm\", \"none\": \"\"}[mask]\n asm_lvl = \"toplevel\" if kwargs.get(\"toplevel\") else \"primary_assembly\"\n version_tag = \"\" if version > 30 else f\".{version}\"\n\n ftp_file = f\"{cap_name}.{asm_name}{version_tag}.dna{mask_lvl}.{asm_lvl}.fa.gz\"\n\n # combine\n link = f\"{ftp_directory}/{ftp_file}\"\n if check_url(link, 2):\n return link\n\n # primary assemblies do not always exist\n if asm_lvl == \"primary_assembly\":\n link = link.replace(\"primary_assembly\", \"toplevel\")\n if check_url(link, 2):\n return link\n\n raise GenomeDownloadError(\n f\"Could not download genome {name} from {self.name}.\\n\"\n \"URL is broken. Select another genome or provider.\\n\"\n f\"Broken URL: {link}\"\n )", "def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None", "def fetch_sequence(sequence_id, database='uniprot'):\n if sequence_id.startswith('UPI'):\n database = 'uniparc'\n url_template = 'http://www.uniprot.org/uniparc/{}.fasta'\n elif sequence_id.startswith('UniRef'):\n database = 'uniref'\n url_template = 'http://www.uniprot.org/uniref/{}.fasta'\n else:\n database = 'uniprot'\n url_template = 'http://www.uniprot.org/uniprot/{}.fasta'\n\n url = url_template.format(sequence_id)\n logger.debug('Downloading sequence {} from {}...'.format(sequence_id, url))\n\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Failed to fetch sequence with return code: {}\".format(r.status_code))\n\n seq = Bio.SeqIO.read(io.StringIO(r.text), 'fasta')\n if database == 'uniprot':\n seq.annotations['db'], seq.id, seq.name = re.split('[\\| ]', seq.id)\n return seq", "def get_assemblies_link_from_accession_number(term):\n ###########print('+++++++',term)\n # provide your own mail here # I wrote the email at the begining of the codes\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax=\"200\")\n record = Entrez.read(handle)\n ids = record[\"IdList\"]\n links = []\n for aid in ids:\n summary = get_id_give_assembly_summary(aid) # get summary\n url = summary[\"DocumentSummarySet\"][\"DocumentSummary\"][0][\"FtpPath_RefSeq\"]\n if url == \"\":\n continue\n label = os.path.basename(url)\n # get the fasta link - change this to get other formats\n link = url + \"/\" + label + \"_genomic.fna.gz\"\n link = link.replace(\"ftp://\", \"https://\")\n links.append(link)\n \n #############print('=======', links)\n return links", "def get_protein_fasta(uniprot_id):\r\n url = \"http://www.uniprot.org/uniprot/{}.fasta\".format(uniprot_id)\r\n string = re.split(\"\\n\",ur.urlopen(url).read().decode(),1)[1]\r\n return re.sub(\"\\n\",\"\",string)", "def fetch_genome(reference_name):\n from utils import script_dir\n genome_list = yaml.load(open(script_dir + \"/utils/genomes.yaml\",\"r\"))\n makedir(\"genomes\")\n if reference_name not in genome_list:\n msg(\"Reference Genome not available\", \"error\")\n ftp_loc = genome_list[reference_name]\n filename = os.path.split(ftp_loc)[1]\n makedir(\"{script_dir}/genomes/{reference_name}\".format(**locals()))\n reference_loc = \"{script_dir}/genomes/{reference_name}/{filename}\".format(**locals())\n if not file_exists( reference_loc + \".sa\"):\n print(\"Downloading {filename}\".format(**locals()))\n os.system(\"curl {ftp_loc} > {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n # Unzip and rezip with bgzip\n if filename.endswith(\".gz\"):\n os.system(\"gunzip {reference_loc} && bgzip {reference_loc2}\".format(reference_loc=reference_loc, reference_loc2=reference_loc.replace(\".gz\",\"\")))\n print(\"Indexing {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n os.system(\"bwa index {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n else:\n msg(\"Reference Already downloaded and indexed.\", \"error\")", "def get_assemblies(term, download=True, path='assemblies'):\n\n from Bio import Entrez\n #provide your own mail here\n Entrez.email = \"A.N.Other@example.com\"\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax='200')\n record = Entrez.read(handle)\n ids = record['IdList']\n print (f'found {len(ids)} ids')\n links = []\n for id in ids:\n #get summary\n summary = get_assembly_summary(id)\n #get ftp link\n url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']\n if url == '':\n continue\n label = os.path.basename(url)\n #get the fasta link - change this to get other formats\n link = os.path.join(url,label+'_genomic.fna.gz')\n print (link)\n links.append(link)\n if download == True:\n #download link\n urllib.request.urlretrieve(link, f'{label}.fna.gz')\n return links", "def test_ncbi_sequence_info_download(self):\n\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_download\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n\n # Simulate download from local files (nucl_gb and species_genome_size)\n params[\"ncbi_url\"] = \"file://\" + os.path.abspath(data_dir) + \"/build-custom/remote/\"\n params[\"ncbi_sequence_info\"] = [\"nucl_gb\"]\n\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")", "def download_file(id, output=DATA_DIR, quiet=False):\n url = f\"https://drive.google.com/uc?id={id}\"\n gdown.download(url, output=output, quiet=quiet)", "def download_proteins(proteins, data_dir, fileroot=\"uniprot\"):\n uniprot = bioservices.UniProt()\n outfile = os.path.join(data_dir, fileroot + \".fasta\")\n with open(outfile, \"w\") as fasta_out:\n lines = uniprot.retrieve(proteins, frmt=\"fasta\")\n lines = \"\".join(lines)\n fasta_out.write(lines)\n\n return outfile", "def download_it(fw, acquisition, file_name, input_path):\n\n safe = make_file_name_safe(file_name, replace_str='_')\n\n full_path = input_path + safe\n\n if acquisition.timestamp:\n if acquisition.timezone:\n created = acquisition.original_timestamp.isoformat()\n else:\n created = acquisition.timestamp.isoformat()\n else:\n created = 'unknown'\n\n rpt = 1\n while full_path in context.gear_dict['niftis']: # then repeated name\n full_path = input_path + str(rpt) + '_' + safe\n rpt += 1\n\n if os.path.isfile(full_path):\n log.info('File exists ' + file_name + ' -> ' +\\\n full_path + ' created ' + created)\n else:\n log.info('Downloading ' + file_name + ' -> ' +\\\n full_path + ' created ' + created)\n acquisition.download_file(file_name, full_path)\n\n full_file = fw.get_acquisition_file_info(acquisition.id, file_name)\n field_strength = full_file.info.get('MagneticFieldStrength')\n\n context.gear_dict['niftis'].append(full_path)\n context.gear_dict['file_names'].append(file_name)\n context.gear_dict['createds'].append(created)\n context.gear_dict['field_strength'].append(field_strength)", "def _download_single(url, to, id):\n if os.path.exists(to):\n error_flags[id] = 1\n return\n\n try:\n request = rq.Request(url=url, headers=forge_agent_header)\n info = rq.urlopen(request).read()\n\n except urllib.error.URLError as e:\n print(url, 'urllib error')\n error_flags[id] = 2\n return\n\n except Exception as e:\n print(url, e)\n error_flags[id] = 2\n return\n\n with open(to, \"wb\") as file:\n print(url, 'writing')\n file.write(info)\n\n error_flags[id] = 1", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def download_epubs(epub_file, outdir=None, sep='|'):\n \n if not outdir:\n outdir = epub_file.split('.')[0]\n if not os.path.exists(outdir):\n os.mkdir(outdir) \n \n print(\"Downloading files to\", outdir)\n with open(epub_file, 'r') as file:\n for line in file.readlines():\n row = line.split(sep)\n gid = row[0]\n try:\n int(gid) \n url = gut_utf8.format(gid)\n r = requests.get(url)\n filename = '_'.join(row[1:3]).strip()\n filename = re.sub(r'\\W+', '_', filename)\n filename = re.sub(r'_+', '_', filename)\n print(gid, filename) \n with open(\"{}/{}-pg{}.txt\".format(outdir, filename, gid), 'w') as outfile:\n outfile.write(r.text)\n except ValueError as e:\n print('#', gid, \"not a GID\")", "def download_SRA(SRA):\n\n print(\"Downloading SRA archive\")\n output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT)\n\n print(\"Extracting FASTQ data\")\n output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)", "def download_assignments(opener, fasta_fname, interval=3):\n params = {\"file\" : open(fasta_fname, \"rb\") }\n #submit and refresh until processed\n result = opener.open(rdp_base+servlet, params)\n while is_processing(result):\n sleep(interval)\n result = opener.open(rdp_base + check_page)\n\n #download the detailed text result\n result = opener.open(rdp_base + get_download_url(result))\n return result", "def getGenomeSequence(genomeId):\n \n r = urllib.urlopen(PatricURL+genomeId+'/'+genomeId+'.fna').read()\n soup = BeautifulSoup(r)\n #print type(soup)\n\n genomeSequence = soup.prettify().split('| '+genomeId+']')[1]\n return genomeSequence.replace('\\n', '')", "def download_index(gaia_index):\n # Create regex to extract URL and file name\n reFile = re.compile(r'<a href=\"(.*(GaiaSource.*gz))\"\\>')\n # Open Gaia HTML index file\n response = urllib.request.urlopen(gaia_index)\n # Read content\n files = []\n page = response.readlines()\n # Extract URLs from the page\n for line in page:\n line = line.decode('utf-8')\n # Extract URLs\n f = reFile.findall(line)\n if (f):\n f = f[0]\n if (f[0].startswith('http')):\n # Absolute path\n files.append((f[0], f[1]))\n else:\n # Relative path\n files.append((urljoin(gaia_index, f[0]), f[1]))\n if len(files) == 0:\n print(f\"Couldn't extract file names from the index page.\\nCheck URL: {gaia_index}\")\n exit(1)\n return files", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)", "def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)", "def Save_Fastas2(UniprotIDs):\r\n file=open(\"../Data/Negative_cases/negative_cases.fasta\",\"w\")\r\n for ID in UniprotIDs:\r\n data=urllib.request.urlopen(\"http://www.uniprot.org/uniprot/%s.fasta\" %ID)\r\n f=data.readlines()\r\n for lines in f:\r\n file.write(str(lines))\r\n #help(data)\r\n file.close()", "def download_participants_document(cupASSistName):\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()), urllib2.HTTPRedirectHandler())\n opener.open(\"http://www.cupassist.com/pamelding/redirect.php?tknavn=\" + cupASSistName)\n return opener.open(\"http://www.cupassist.com/pamelding/vis_paamelding.php\").read()", "def uniprot(gene, organism, output_file):\n\n print(\"\\tUniprot ...\")\n\n # Request\n domain = \"https://www.uniprot.org/uniprot\"\n query = f\"?query=gene_exact%3A{gene}+organism%3A{organism}\"\n extend = \"columns=id,protein_names&format=tab\"\n r = requests.get(f\"{domain}/{query}&{extend}\")\n result = r.text.splitlines()\n\n # Extract Uniprot IDs and Offical Protein Names\n uniprot_id = []\n uniprot_name = []\n if result != []:\n del(result[0]) # Remove the header\n for line in result: # Extracting IDs and names\n colonne = line.split('\\t')\n id = colonne[0]\n name = colonne[1]\n uniprot_id.append(id)\n if colonne[1] not in uniprot_name:\n uniprot_name.append(name)\n\n # Write the Uniprot IDs\n output_file.write(\"<td><div class='scroll'>\")\n for id in uniprot_id:\n output_file.write(f'<a href=\"{domain}/{id}\">{id}</a><br>')\n output_file.write(\"</div></td>\")\n\n # Write the Uniprot Offical Names\n output_file.write(\"<td><div class='scroll'>\")\n output_file.write(f\"{'<br>'.join(uniprot_name)}</div></td>\")\n return uniprot_id\n else:\n output_file.write(\"<td><i>No data found</i></td>\"*2)\n return uniprot_id", "def download_torrent(self):\n try:\n if self.back_to_menu is True:\n return\n if self.found_torrents is False:\n print('Nothing found.')\n return\n elif self.mode_search == 'list':\n if self.selected is not None:\n # t_p, pirate and 1337x got magnet inside, else direct.\n if self.page in ['the_pirate_bay',\n 'torrent_project',\n '1337x',\n 'isohunt']:\n url = self.hrefs[int(self.selected)]\n self.get_magnet(url)\n print('Downloading movie: '+self.movieName+' from url: '+url)\n else:\n print('Bad selected page.')\n else:\n print('Nothing selected.')\n sys.exit(1)\n except Exception:\n print(traceback.format_exc())\n sys.exit(0)", "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def download_assignment_prof(request, pk):\n assignment = Assignment.objects.\\\n filter(pk=pk, assignmentype__prof__user=request.user).first()\n if assignment:\n filename = 'assign_%s.%s' % (assignment.student.user.username,\n assignment.document.name.split('.')[-1])\n response = HttpResponse(assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def generate_fasta(seq_file, out_dir):\n\n LOGGER.info(\"Generating fasta file\", seq_file)\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, \"missing_seqs.log\")\n logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)\n\n cnx = RfamDB.connect()\n cursor = cnx.cursor(raw=True)\n\n # fetch clan specific family full_region data and sequence description\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"ORDER BY fr.rfam_acc\")\n\n cursor.execute(query)\n\n for region in cursor:\n\n # new family\n if str(region[RFAM_ACC]) != rfam_acc:\n # check if there's no open file\n if fp_out is not None:\n fp_out.close()\n\n # open new fasta file\n fp_out = gzip.open(\n os.path.join(out_dir, str(region[RFAM_ACC]) + \".fa.gz\"), 'w')\n\n rfam_acc = region[RFAM_ACC]\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(sequence)\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)", "def retrieve_genome_data(filepath):\n try:\n seqrecords = list(SeqIO.parse(filepath, \"genbank\"))\n except:\n seqrecords = []\n # filename = filepath.split(\"/\")[-1]\n if len(seqrecords) == 0:\n print(f\"There are no records in {filepath.name}.\")\n seqrecord = None\n elif len(seqrecords) > 1:\n print(f\"There are multiple records in {filepath.name}.\" )\n seqrecord = None\n else:\n seqrecord = seqrecords[0]\n return seqrecord", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def _download_scn_asf(params):\n pid = params[0]\n product_file_id = params[1]\n remote_url = params[2]\n db_info_obj = params[3]\n scn_lcl_dwnld_path = params[4]\n asf_user = params[5]\n asf_pass = params[6]\n success = False\n\n eodd_wget_downloader = eodatadown.eodatadownutils.EODDWGetDownload()\n start_date = datetime.datetime.now()\n try:\n success = eodd_wget_downloader.downloadFile(remote_url, scn_lcl_dwnld_path, username=asf_user,\n password=asf_pass, try_number=\"10\", time_out=\"60\")\n except Exception as e:\n logger.error(\"An error has occurred while downloading from ASF: '{}'\".format(e))\n end_date = datetime.datetime.now()\n\n if success and os.path.exists(scn_lcl_dwnld_path):\n logger.debug(\"Set up database connection and update record.\")\n db_engine = sqlalchemy.create_engine(db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == pid).one_or_none()\n if query_result is None:\n logger.error(\"Could not find the scene within local database: \" + product_file_id)\n else:\n query_result.Downloaded = True\n query_result.Download_Start_Date = start_date\n query_result.Download_End_Date = end_date\n query_result.Download_Path = scn_lcl_dwnld_path\n ses.commit()\n ses.close()\n logger.info(\"Finished download and updated database: {}\".format(scn_lcl_dwnld_path))\n else:\n logger.error(\"Download did not complete, re-run and it should try again: {}\".format(scn_lcl_dwnld_path))", "def fetch_file(index_file, filename):\n with open(index_file, 'r') as index, open(filename, 'w+') as download:\n print 'Fetching keys from ', KEYSERVER, ' to create ', filename\n fetched_file = ''\n index_length = len(index.readlines())\n index.seek(0) # because python is stupid\n counter = 0\n for key in index.readlines():\n print 'Fetching key ', counter, ' of ', index_length\n counter = counter + 1\n fetched_file = fetched_file + parse_key(key.rstrip('\\n'))\n print 'All keys have been downloaded'\n download.write(base64.b64decode(fetched_file))\n print 'File has been decoded and saved as ', filename", "def _query_ncbi(self):\n import requests\n\n response = requests.get(\n f\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?\"\n f\"db=protein&id={self.ncbi_id}&rettype=fasta&retmode=text\"\n )\n if response.status_code != 200:\n raise ValueError(f\"Failed to fetch sequence for NCBI ID {self.ncbi_id}\")\n\n self._sequence = \"\".join(response.text.split(\"\\n\")[1:])\n self.metadata[\"sequence_source\"] = \"NCBI\"", "def get_download_link(acc, field=\"fastq_ftp\"):\n links = list()\n # https://www.ebi.ac.uk/ena/submit/read-data-format\n if acc[2] == \"A\":\n keyword = \"submission_accession\"\n elif acc[2] == \"S\":\n keyword = \"sample_accession\"\n elif acc[2] == \"P\":\n keyword = \"study_accession\"\n elif acc[2] == \"X\":\n keyword = \"experiment_accession\"\n elif acc[2] == \"R\":\n keyword = \"run_accession\"\n else:\n print(\"Unsupported accession type %s\" % acc)\n return []\n api_bus = 'https://www.ebi.ac.uk/ena/portal/api/search?result=read_run&query=\"%s=%s\"&fields=%s&format=json' % (keyword, acc, field)\n res_data = urlopen(api_bus)\n ret = res_data.read()\n if ret:\n res = json.loads(ret)\n for run in res:\n links.extend(run[field].split(\";\"))\n return links\n else:\n return []", "def getseq(genomefasta):\n genomedict = {}\n for i in SeqIO.parse(open(genomefasta), \"fasta\"):\n genomedict[i.id] = str(i.seq)\n return genomedict", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "async def get_file(self, link, name, md5, session):\n if os.path.exists(name) or md5 in opts.archived_md5:\n self.count += 1\n return\n\n async with session.get(link) as media:\n # Open file initially with .part suffix\n with open(f\"{name}.part\", \"wb\") as f:\n while True:\n chunk = await media.content.read(1024)\n if not chunk:\n break\n f.write(chunk)\n\n # Remove .part suffix once complete\n # After this point file won't get removed if script gets interrupted\n os.rename(f\"{name}.part\", name)\n\n if opts.archive:\n log_hash(md5)\n self.count += 1\n msg(f\"{self.fetch_progress()} {self.board}/{self.dir}/{name}\")", "def get_sequences_from_id3c(url, username, password, lineage, segment, output):\n r = requests.get(url, auth=(username,password), stream=True)\n r.raise_for_status()\n\n with open(output, 'w+') as fasta_file:\n for line in r.iter_lines():\n if line:\n sequence = json.loads(line)\n strain = sequence['sample'][-8:] # this needs revision in ID3C to match format A/Washington/a2fb5c0f/2019\n fasta_file.write(\"\".join([\">\", strain, \"\\n\", sequence['seq'].lower(), \"\\n\"]))", "def download_from_pride(pxdataset, fasta, data_dir):\n if pxdataset.pxid in SPECIAL:\n download_special(pxdataset, data_dir)\n else:\n pxdataset.pxget(fasta, dest_dir=data_dir)\n os.path.join(data_dir, fasta)", "def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)", "def swissprot_fasta_downloader(database_directory: Path) -> Path:\n # Create protein folder\n merged_db_folder = database_directory / \"protein_db\"\n merged_db_folder.mkdir(parents=True, exist_ok=True)\n # Get release number\n rel_url = (\n f\"ftp://ftp.uniprot.org/pub/databases/uniprot/\"\n f\"current_release/knowledgebase/complete/reldate.txt\")\n release_file = urllib.request.urlopen(rel_url)\n release_number = release_file.readline().decode('utf-8').split()[3]\n # Write release information into file\n with open(merged_db_folder / \"uniprot_release.txt\", 'w') as relinfo:\n relinfo.write(\"Release number (date) {}\".format(release_number))\n # Download fasta files\n logger.info(\"Downloading SwissProt Fasta files\")\n # Overwrite existing temporal files\n output_fasta = merged_db_folder / \"uniprot_sprot.fasta.gz\"\n if output_fasta.is_file():\n output_fasta.unlink()\n # Download swissprot\n fasta_url = (\n f\"ftp://ftp.uniprot.org/pub/databases/uniprot/\"\n f\"current_release/knowledgebase/complete/uniprot_sprot.fasta.gz\")\n wget.download(fasta_url, out=str(output_fasta))\n decompressed_fasta = merged_db_folder / \"uniprot_sprot.fasta\"\n with gzip.open(output_fasta, 'rt') as compressed_fh, \\\n open(decompressed_fasta, 'w') as decompressed_fh:\n copyfileobj(compressed_fh, decompressed_fh)\n output_fasta.unlink()\n logger.info(\"Finished\")\n\n return output_fasta", "def download_files(self):", "def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)", "def extract_cds(cds_dir, species, prot_id):\r\n # Get the filename that we want to query\r\n # for f in file_list:\r\n #if species in f:\r\n # break\r\n # Then, buld the command line\r\n cds_fname = os.path.join(cds_dir, species)\r\n cmd = ['samtools', 'faidx', cds_fname, prot_id]\r\n proc = subprocess.Popen(\r\n cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE\r\n )\r\n out, err = proc.communicate()\r\n # Then, process the output. We will remove the first line, since it is\r\n # just the sequence ID. We will then put all the nucleotides into a single\r\n # long string.\r\n out = out.decode('utf-8')\r\n lines = out.split('\\n')\r\n cds = ''.join(lines[1:])\r\n return cds", "def download(request, ef_id):\n ef = get_object_or_404(ExamFile, id=ef_id)\n path = os.path.join(settings.MEDIA_ROOT, ef.path.path)\n response= HttpResponse(content=file(path, 'rb').read(), \n mimetype='application/pdf')\n # fn = os.path.split(ef.path.path)[1]\n # response['Content-Disposition'] = \"attachment; filename=%s\" % (fn)\n return response", "def submit_online (fastafile : Path, outputfile: Path) :\r\n\r\n try:\r\n\r\n url1 = 'http://bioapp.iis.sinica.edu.tw/Nglyde/run.php'\r\n url2 = 'http://bioapp.iis.sinica.edu.tw/GlycoPred/MakeSummary.php'\r\n message = \"All queries have been done!!\"\r\n\r\n #launch job\r\n with open(fastafile, 'r') as f :\r\n seq = f.read()\r\n\r\n data = {'sequence': seq}\r\n r1 = requests.post(url1, data=data )\r\n\r\n\r\n #retrieve results\r\n temp = re.search(r\"job=.+\\)\", r1.text)\r\n jobid = temp.group().split('=')[1][:-1]\r\n\r\n sleep(2)\r\n r2 = requests.get(url2, params={'job' : jobid })\r\n\r\n count = 0; maxtime = 300\r\n while message not in r2.texta and count < maxtime/2 :\r\n sleep(2)\r\n r2 = requests.get(url2, params={'job': jobid })\r\n count += 1\r\n\r\n r2.raise_for_status()\r\n\r\n file = open(outputfile, \"w\")\r\n file.write(r2.text)\r\n\r\n except Exception as e:\r\n print(\"Failed: Online job submission failed !!!!\")\r\n if hasattr(e, 'message'): print(e.message)\r\n else: print(e)\r\n with open(outputfile, 'w', encoding='utf-8') as f:\r\n print(\"#Failed: Online job submission failed !!!! Error: \", e, file=f)\r\n pass", "def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')", "def _download_openfda_faers(self, resource, output) -> List[ManifestResource]:\n self._logger.info(\"OpenFDA available files download, URI '{}' --- START ---\".format(resource.uri))\n self._logger.info(\"Download OpenFDA FAERS repository metadata\")\n download = Downloads.download_staging_http(output.staging_dir, resource)\n repo_metadata = {}\n with open(download.path_destination, 'r') as f:\n repo_metadata = json.load(f)\n return self._download_selected_event_files(repo_metadata, output)", "def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)", "def elar_download(bundle_id, phpsessid, extension):\n\n # check for validity of ID\n try:\n soasID = bundle_id.split(\"oai:soas.ac.uk:\")[1]\n except IndexError: # bundle_id does not start with oai:soas.ac.uk:, so we are not interested\n print(\"not a SOAS file\", soasID)\n return\n # prepare request\n url = \"https://elar.soas.ac.uk/Record/%s\" % soasID\n cookies = {\"PHPSESSID\": phpsessid}\n print(\"checking\", url)\n # retrieve catalog page\n with requests.Session() as s:\n r = s.post(url, cookies=cookies)\n html = r.text\n # extract links to ELAN files\n try:\n links = fromstring(html).findall(\".//tbody/tr/td/a\")\n locations = {\n a.attrib[\"href\"] for a in links if a.attrib[\"href\"].endswith(extension)\n }\n except AttributeError: # not an ELAN file\n print(\"files are not accessible\")\n return\n # dowload identified files\n if locations == []:\n print(\"files are not accessible\")\n return\n for location in locations:\n download_url = location\n bs = location.split(\"/\")[-1].split('-b-')\n if len(bs) == 1:\n collectionname = 'no_collection'\n basename = bs[0]\n else:\n collectionname = bs[0]\n basename = '-b-'.join(bs[1:])\n filepath = os.path.join('elar', collectionname, basename)\n if len(filepath) > 150:\n filepath = os.path.join('elar', collectionname, \"%s.%s\" % (hash(basename[:-4]),extension))\n print(\" downloading %s as %s:\" % (location, filepath))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n save_file(s, filepath, download_url, cookies)", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def read_genbank(genome_accession_no, genbank_file=None):\n \n if genbank_file:\n print \"reading genbank file %s\" % genbank_file\n seq_record = SeqIO.read(genbank_file, \"genbank\")\n else:\n print \"downloading and parsing genbank file for %s\" % genome_accession_no\n handle = Entrez.efetch(db=\"nucleotide\", rettype=\"gb\",\n retmode=\"text\", id=genome_accession_no)\n seq_record = SeqIO.read(handle, \"gb\")\n handle.close()\n return seq_record", "def retrieve_data(\n ids, display, download=None, file=None, offset=None, length=None,\n subseq_range=None, expanded=False, header=False\n):\n url = build_retrieve_url(\n ids=ids,\n display=display,\n result=None,\n download=download,\n file=file,\n offset=offset,\n length=length,\n subseq_range=subseq_range,\n expanded=expanded,\n header=header)\n return request_url(url, display, file)", "def download(progid, date):\n logger = log.getLogger('obslog.download')\n\n if not re.match(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}$', progid):\n logger.error('This does not look like a program ID: %s', progid)\n raise SystemExit\n\n obslog = date + '_' + progid + '_obslog.txt'\n url = 'https://archive.gemini.edu/file/' + obslog\n logger.debug('URL: %s', url)\n logger.info('Downloading %s...', obslog)\n urllib.urlretrieve(url, obslog)\n return", "def use_http(accession):\n import re\n import requests\n accession, version = accession.split('.')\n db, digits = accession.split(\"_\")\n digits_split = '/'.join(re.findall('.{1,3}', digits))\n url = f'https://ftp.ncbi.nlm.nih.gov/genomes/all/{db}/{digits_split}'\n \n r = requests.get(url)\n current_accession = []\n if r.status_code == 200: \n # Success\n links = re.findall(\"href=[\\\"\\'](.*?)[\\\"\\']\", r.text)\n for link in links:\n if link.startswith(accession):\n t_db, t_version, t_extra = link.split(\"_\", 2)\n current_accession.append(f\"{t_db}_{t_version}\")\n\n if len(current_accession) == 1:\n return [current_accession[0], False, None, None]\n else:\n if not len(current_accession):\n return [current_accession, False, True, \"Unable to parse and accession\"]\n else:\n return [sorted(current_accession, reverse=True)[0], False, None, None]\n \n else:\n return [accession, True, False, f\"Accession does not exist at {url}, status code {r.status_code}\"]", "def get_gtfs(agency, fetch):\n if not fetch.get('filename') or not fetch.get('file_url'):\n print \"Feed reference incomplete!:\", fetch\n return\n makedirs(agency)\n filename = os.path.join(agency, fetch['filename'])\n if os.path.exists(filename) and os.stat(filename).st_size == fetch['size']:\n print \"Existing, skipping:\", fetch['file_url']\n else:\n print \"Downloading:\", fetch['file_url']\n urllib.urlretrieve(fetch['file_url'], filename)\n print \"Done\"", "def download_special(pxdataset, data_dir):\n # PXD004074 (Tsr1) --------------------------------------------------------\n if pxdataset.pxid == \"PXD004074\":\n tsr1_filename = \"Rappsilber_Cook_CLMS_Tsr1_fasta.zip\"\n tsr1_zip = os.path.join(data_dir, tsr1_filename)\n pxdataset.pxget(tsr1_filename, data_dir)\n\n with zipfile.ZipFile(tsr1_zip, \"r\") as fname:\n fname.extractall(data_dir)\n\n # PXD010222 (PPARg_LBD) ---------------------------------------------------\n if pxdataset.pxid == \"PXD010222\":\n ppar_seq = [\n \">wef|PV4545|PPARg-LBD_human GST-tagged PPARgamma LBD\",\n \"MAPILGYWKIKGLVQPTRLLLEYLEEKYEEHLYERDEGDKWRNKKFELGLEFPNLPYYIDGD\",\n \"VKLTQSMAIIRYIADKHNMLGGCPKERAEISMLEGAVDIRYGVSRIAYSKDFETLKVDFLSK\",\n \"LPEMLKMFEDRLCHKTYLNGDHVTHPDFMLYDALDVVLYMDPMCLDAFPKLVCFKKRIEAIP\",\n \"QIDKYLKSSKYIALWPLQGWQATFGGGDHPPKSDLVPRHNQTSLYKKAGTMQLNPESADLRA\",\n \"LAKHLYDSYIKSFPLTKAKARAILTGKTTDKSPFVIYDMNSLMMGEDKIKFKHITPLQEQSK\",\n \"EVAIRIFQGCQFRSVEAVQEITEYAKSIPGFVNLDLNDQVTLLKYGVHEIIYTMLASLMNKD\",\n \"GVLISEGQGFMTREFLKSLRKPFGDFMEPKFEFAVKFNALELDDSDLAIFIAVIILSGDRPG\",\n \"LLNVKPIEDIQDNLLQALELQLKLNHPESSQLFAKLLQKMTDLRQIVTEHVQLLQVIKKTET\",\n \"DMSLHPLLQEIYKDL\"\n ]\n\n ppar_path = os.path.join(data_dir, \"pparg.fasta\")\n with open(ppar_path, \"w\") as fasta:\n fasta.writelines([l + \"\\n\" for l in ppar_seq])", "def export_fasta(self, metadata, analysistype, reportpath, cutoff, program):\n logging.info('Creating FASTA-formatted files of outputs')\n for sample in metadata:\n # Set the name of the FASTA output file\n sample[analysistype].fasta_output = os.path.join(reportpath, '{sn}_{prog}.fasta'.format(sn=sample.name,\n prog=analysistype))\n # Remove the file if it exists. Otherwise, if the samples are processed by the pipeline more than\n # once, the same results will be appended to the file\n try:\n os.remove(sample[analysistype].fasta_output)\n except FileNotFoundError:\n pass\n # Process the sample only if the script could find targets\n if sample[analysistype].blastresults != 'NA' and sample[analysistype].blastresults:\n # Open the FASTA output file in append mode\n with open(sample[analysistype].fasta_output, 'a+') as fasta_output:\n for target in sorted(sample[analysistype].targetnames):\n index = 0\n for hit in sample[analysistype].blastlist:\n if hit['subject_id'] == target:\n # Set the name and percent id to avoid writing out the dictionary[key] multiple times\n if float(hit['percent_match']) >= cutoff:\n # If the 'align' option was not specified, the .dnaseq attribute will be an empty\n # dictionary. Populate this attribute as required\n try:\n # The .dnaseq attribute will not exist for amino-acid based searches\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n # The .targetsequence attribute will be sufficient\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except (KeyError, IndexError):\n # Align the protein (and nucleotide) sequences to the reference\n sample = self.alignprotein(sample=sample,\n analysistype=analysistype,\n target=target,\n program=program,\n index=index,\n hit=hit)\n try:\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except IndexError:\n fasta = str()\n # Create the SeqRecord of the FASTA sequence\n if fasta:\n try:\n record = SeqRecord(fasta,\n id='{name}_{target}'\n .format(name=sample.name,\n target=target),\n description='')\n # Write the FASTA-formatted record to file\n fasta_output.write(record.format('fasta'))\n except (AttributeError, TypeError):\n pass\n index += 1\n # Return the updated metadata object\n return metadata", "def download_taxonomic_file(self, overwrite=False): # pragma: no cover\n import ftplib\n from sequana import sequana_config_path\n\n if os.path.exists(self.database) and overwrite is False:\n logger.info(\"Found taxonomy.dat file in sequana your path {}\".format(sequana_config_path))\n return\n else:\n logger.info(\"Downloading and extracting the taxonomy file from the web. Please be patient.\")\n\n if self.source == \"ena\":\n url = \"ftp.ebi.ac.uk\"\n else:\n url = \"ftp.ncbi.nlm.nih.gov\"\n\n self.ftp = ftplib.FTP(url)\n self.ftp.login()\n if self.source == \"ena\":\n # for the EBI ftp only: self.ftp.cwd('databases')\n self.ftp.cwd(\"pub\")\n self.ftp.cwd(\"databases\")\n self.ftp.cwd(\"taxonomy\")\n logger.warning(\n \"Downloading and saving in %s. This is from ebi and may be behind the NCBI taxonomy\" % self.database\n )\n self.ftp.retrbinary(\"RETR taxonomy.dat\", open(self.database, \"wb\").write)\n ftp.close()\n else:\n self.ftp.cwd(\"pub\")\n self.ftp.cwd(\"taxonomy\")\n logger.warning(\"Downloading and saving in %s from ncbi ftp\" % self.database)\n import tempfile\n import shutil\n\n with tempfile.TemporaryDirectory() as tmpdir:\n filename = tmpdir + os.sep + \"taxdump.tar.gz\"\n self.ftp.retrbinary(\"RETR taxdump.tar.gz\", open(filename, \"wb\").write)\n import tarfile\n\n tf = tarfile.open(filename)\n assert \"nodes.dmp\" in tf.getnames()\n assert \"names.dmp\" in tf.getnames()\n tf.extract(\"nodes.dmp\", tmpdir)\n tf.extract(\"names.dmp\", tmpdir)\n ncbi = NCBITaxonomy(tmpdir + os.sep + \"names.dmp\", tmpdir + os.sep + \"nodes.dmp\")\n ncbi.create_taxonomy_file(tmpdir + os.sep + \"taxonomy.dat\")\n shutil.move(tmpdir + os.sep + \"taxonomy.dat\", self.database)\n self.ftp.close()", "def download_structure(inputpdbid):\n try:\n if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein':\n logger.error(f'invalid PDB-ID (wrong format): {inputpdbid}')\n sys.exit(1)\n pdbfile, pdbid = fetch_pdb(inputpdbid.lower())\n pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid))\n create_folder_if_not_exists(config.BASEPATH)\n with open(pdbpath, 'w') as g:\n g.write(pdbfile)\n return pdbpath, pdbid\n except ValueError: # Invalid PDB ID, cannot fetch from RCBS server\n logger.error(f'PDB-ID does not exist: {inputpdbid}')\n sys.exit(1)", "def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)", "def boldExtract(genera):\r\n # Prepare Web Service Endpoint for BOLD's Public Data Portal API\r\n # Appending BOLD's base URL to each genera from the NSR list\r\n base_url = 'http://v4.boldsystems.org/index.php/API_Public/combined?taxon='\r\n source_urls = list(map(lambda x: \"{}{}{}\".\r\n format(base_url, x, '&format=tsv'), genera))\r\n\r\n # Download sequence data from BOLD using list of url's\r\n print('Beginning sequence data retrieval...')\r\n counter = 0\r\n for url in source_urls:\r\n r = http.request('GET', url)\r\n name = genera[counter]\r\n counter += 1\r\n with open(args.outdir1+\"/\"+name+\".tsv\", \"wb\") as fcont:\r\n fcont.write(r.data)", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def cafa4_mapping() -> pd.DataFrame:\n # List of the paths considered in the function\n paths = [\n \"cafa4.tar.gz\",\n \"CAFA4-export/TargetFiles/sp_species.9606.tfa\"\n ]\n if not any(os.path.exists(path) for path in paths):\n # Downloading the url to the given path\n download(\n url=\"https://www.biofunctionprediction.org/cafa-targets/CAFA4-export.tgz\",\n path=paths[0]\n )\n # Extracting the acquire\n shutil.unpack_archive(paths[0], \".\")\n # Delete the archived file\n os.remove(paths[0])\n # Parse the file and retrieve the IDs from the fasta file\n f = open(paths[1], \"r\")\n df = pd.DataFrame(\n (\n line[1:-1].split(\" \")\n for line in f.readlines()\n if line.startswith(\">\")\n ),\n columns=[\n \"cafa4_id\",\n \"uniprot_id\"\n ]\n )\n f.close()\n # Return the obtained IDs\n return df", "def download_file(client, file_id):\n\n file_content = client.file(file_id).content()\n print(file_content)", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )", "def start_torrent_download(filename):\n return tadapt.start_download(filename)", "def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)", "def fetch(data_dir, dest=\"aida\"):\n\n # Get CoNLL03\n conll_dir = conll03.fetch(data_dir)\n\n # Create folder\n aida_dir = os.path.join(data_dir, dest)\n utils.create_folder(aida_dir)\n\n # Download AIDA\n aida_file = os.path.join(aida_dir, AIDA_FILE)\n if not os.path.exists(aida_file):\n utils.urlretrieve(AIDA_URL, aida_file)\n\n # Extract annotations\n final_dir = os.path.join(aida_dir, AIDA_NAME)\n if not os.path.exists(final_dir):\n with zipfile.ZipFile(aida_file, \"r\") as aida:\n aida.extractall(aida_dir)\n\n # Run AIDA script\n final_file = os.path.join(final_dir, AIDA_FINAL_FILE)\n if not os.path.exists(final_file):\n os.chdir(final_dir)\n subprocess.call(AIDA_SCRIPT.format(conll_dir), shell=True)\n\n return final_dir", "def download_assignment_student(request, pk, i):\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, evaluator=request.user).first()\n if evalassignment:\n eval_name = '%s_%s' % (evalassignment.assignment.assignmentype.title.\n replace(\" \", \"\"), i)\n filename = 'assign_%s.%s' % (eval_name, evalassignment.assignment.\n document.name.split('.')[-1])\n response = HttpResponse(evalassignment.assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:dashboard_student')", "def download(isamAppliance, filename, id=None, comment=None, check_mode=False, force=False):\n ids = []\n download_flag = False\n if (isinstance(id, list)):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif (_check(isamAppliance, id=id) is True):\n download_flag = True\n ids.append(id)\n elif (comment is not None):\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info(\"Downloading the following list of IDs: {}\".format(ids))\n\n if force is True or (\n os.path.exists(filename) is False and download_flag is True): # Don't overwrite if not forced to\n if check_mode is False: # We are in check_mode but would try to download named ids\n # Download all ids known so far\n return isamAppliance.invoke_get_file(\"Downloading multiple snapshots\",\n \"/snapshots/download?record_ids=\" + \",\".join(ids), filename)\n\n return isamAppliance.create_return_object()", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "async def download_file(\n location_id: LocationID,\n file_id: StorageFileID,\n user_id: UserID,\n link_type: LinkType = LinkType.PRESIGNED,\n):", "def main():\n\n #Getthefiles\n all_fna_file_path = []\n path_to_all_info = '/Users/gustavotamasco/Google Drive/Shared drives/Projeto MDR KRP/Dados_Sequenciamento/'\n dirpath=os.getcwd()\n os.chdir(path_to_all_info)\n directories = list_directories(path_to_all_info)\n\n '''Genomes'''\n genomes_path = \"{}{}\".format(path_to_all_info,directories[0])\n os.chdir(genomes_path)\n genome_dir = list_directories(genomes_path)\n for organism in genome_dir:\n fna_files = list_files(all_fna_file_path,genomes_path,organism)\n print_status(fna_files)\n\n '''Building a dir of fna files'''\n genomes_fna_path = \"{}genomes_parsnp\".format(dirpath)\n create_genomes_dir(genomes_fna_path)\n os.chdir(genomes_fna_path)\n for file in fna_files:\n move_file(file, genomes_fna_path)\n\n '''Adding extra organism from a different source'''\n klebs = \"/Users/gustavotamasco/mdrkrp/klebs\"\n k_files = list_files_new_source(klebs)\n for k_file in k_files:\n if \".fna\" in k_file:\n final_k_file = \"{}/{}\".format(klebs,k_file)\n move_file(final_k_file, genomes_fna_path)\n\n\n '''Run parsnp'''\n run_parsnp(dirpath, genomes_fna_path)", "def download_mission(self):\n cmds = self.vehicle.commands\n cmds.download()\n # Wait until download is complete.\n cmds.wait_valid()", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def onto(disgenet, edam):\n disgenet = disgenet.replace(' ', '+').replace(\"'\", \"%27\")\n edam = edam.replace(' ', '+').replace(\"'\", \"%27\")\n disid = subprocess.Popen(\n [\"curl -s -k http://127.0.0.1:3030/ds/query -X POST --data \" +\n \"'query=PREFIX+rdf%3A+%3Chttp%3A%2F%2Fwww.w3.org%2F1999%2F02%2F22-rdf-syntax-ns%23%3E%0A\" +\n \"PREFIX+dcterms%3A+%3Chttp%3A%2F%2Fpurl.org%2Fdc%2Fterms%2F%3E%0A\" +\n \"PREFIX+ncit%3A+%3Chttp%3A%2F%2Fncicb.nci.nih.gov%2Fxml%2Fowl%2FEVS%2FThesaurus.owl%23%3E%0A\" +\n \"SELECT+DISTINCT+%0A%09%3Fdisease+%0AFROM+%3Chttp%3A%2F%2Frdf.disgenet.org%3E+%0AWHERE+%7B%0A++\" +\n \"SERVICE+%3Chttp%3A%2F%2Frdf.disgenet.org%2Fsparql%2F%3E+%7B%0A++++\" +\n \"%3Fdisease+rdf%3Atype+ncit%3AC7057+%3B%0A++++%09dcterms%3Atitle+%22\" + disgenet +\n \"%22%40en+.%0A%7D%0A%7D' -H 'Accept: application/sparql-results+json,*/*;q=0.9'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n edam_id = subprocess.Popen([\"curl -s 'http://www.ebi.ac.uk/ols/api/search?q=\" + edam + \"&ontology=edam' 'Accept: application/json'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n try:\n jdisease = json.loads(disid)\n umllist = []\n umls = jdisease['results']['bindings'][0]['disease']['value']\n except (IndexError, ValueError):\n umls = \"No disgenet record\"\n try:\n jedam = json.loads(edam_id)\n eid = jedam['response']['docs'][0]['iri']\n except (IndexError, ValueError):\n eid = \"No EDAM record\"\n return umls, eid", "def getSequence(ref, fasta):\n\n fasta_header = \"\"\n\n fh_fasta = open(fasta, \"r\")\n entry = (x[1] for x in groupby(fh_fasta, lambda line: line[0] == \">\"))\n\n for header in entry:\n headerStr = header.__next__()[1:].strip()\n\n seq = \"\".join(s.strip() for s in entry.__next__())\n\n if ref == headerStr.replace('>',''):\n filename = os.path.join(os.getcwd(), ref.replace('/','_').split('|')[0])\n fasta_header = replace_char(headerStr)\n\n with open(filename + '.fa', \"w\") as output_file:\n output_file.write(\">\" + fasta_header + \"\\\\n\" + seq.upper() + \"\\\\n\")\n\n fh_fasta.close()\n return fasta_header", "def get_file_by_url(url, params=None, **kwargs):\n\n try:\n req = requests.get(url=url, params=params, **kwargs)\n except requests.exceptions.RequestException:\n print(\"Error retrieving data from {}\".format(url))\n return None\n\n req.encoding = req.apparent_encoding\n res_text = \"\\n\".join([domain_to_idna(line) for line in req.text.split(\"\\n\")])\n return res_text", "def blaster(protSeq, orgnID = \"Mus musculus\"):\n \n from Bio.Blast.NCBIWWW import qblast\n from Bio.Blast import NCBIXML\n from sys import exit\n \n print(\"\\nconnecting to BLAST server. this will take some time...\")\n i = 1\n while i < 4: # BLAST sometimes returns empty results. if so, try once more, it happens quite rarely and resending the query seems to fix it.\n print(\"attempt number \" + str(i))\n i += 1\n resX = qblast(\"blastp\",\"refseq_protein\", protSeq, entrez_query= orgnID + \"[organism]\")\n resO = NCBIXML.read(resX)\n if resO.descriptions != []: break \n if resO.descriptions == []: \n print(\"connection unsuccessful. The BLAST server is acting up. Try again later.\")\n exit(0)\n \n else: print(\"connection successful\")\n \n print(resO.descriptions[0])\n descO = resO.descriptions[0]\n if descO.e < 0.01: \n try:\n descID = descO.title.split(\"|\")[3] # not sure why I picked element 3 here\n except IndexError:\n descID = descO.title.split(\"|\")[1]\n \n if \".\" in descID: return descID.split(\".\")[0]\n else: return descID\n \n else: return \"-\"", "def download_many(archivos:[(\"url\",\"nombre\")], carpeta:str=PATH, *, ignore_error:bool=True, _gui:bool=False, **tqdm_karg):", "def fetch_fasta_from_genome(self, genome_refl):\n\n if not self.check_ref_type(genome_ref, ['KBaseGenomes.Genome']):\n raise ValueError(\"The given genome_ref {} is not a KBaseGenomes.Genome type!\")\n # test if genome references an assembly type\n # do get_objects2 without data. get list of refs\n ws = Workspace(self.ws_url)\n genome_obj_info = ws.get_objects2({\n 'objects': [{'ref': genome_ref}],\n 'no_data': 1\n })\n # get the list of genome refs from the returned info.\n # if there are no refs (or something funky with the return), this will be an empty list.\n # this WILL fail if data is an empty list. But it shouldn't be, and we know because\n # we have a real genome reference, or get_objects2 would fail.\n genome_obj_refs = genome_obj_info.get('data', [{}])[0].get('refs', [])\n\n # see which of those are of an appropriate type (ContigSet or Assembly), if any.\n assembly_ref = list()\n ref_params = [{'ref': genome_ref + \";\" + x} for x in genome_obj_refs]\n ref_info = ws.get_object_info3({'objects': ref_params})\n for idx, info in enumerate(ref_info.get('infos')):\n if \"KBaseGenomeAnnotations.Assembly\" in info[2] or \"KBaseGenomes.ContigSet\" in info[2]:\n assembly_ref.append(\";\".join(ref_info.get('paths')[idx]))\n\n if len(assembly_ref) == 1:\n return fetch_fasta_from_assembly(assembly_ref[0], self.ws_url, self.callback_url)\n else:\n raise ValueError(\"Multiple assemblies found associated with the given genome ref {}! \"\n \"Unable to continue.\")", "def download(self, outputfile: str, outputformat: str):\n pass", "def download(self):\n if not self.id:\n raise AttributeError(\n \"Document ID not provided. Assign it to AutomatedDocument object `id` attribute.\"\n )\n response = self._client.get(\"{}{}/\".format(self._path, self.id), stream=True)\n\n if not self.filepath:\n self.filepath = response.headers[\"Content-Disposition\"].split(\"=\")[-1]\n\n with open(self.filepath, mode=\"wb\") as f:\n for chunk in response.iter_content(chunk_size=1024 * 1024):\n f.write(chunk)\n\n return response", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def download_data(self) -> None: # coverage: ignore\n\n navaids = []\n c = requests.get(f\"{base_url}/earth_fix.dat\")\n\n for line in c.iter_lines():\n\n line = line.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) < 3 or line[0] == \"#\":\n continue\n\n # Start with valid 2 digit latitude -45. or 52.\n if not ((line[0] == \"-\" and line[3] == \".\") or line[2] == \".\"):\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example line:\n # 30.580372 -094.384169 FAREL\n fields = line.split()\n navaids.append(\n Navaid(\n fields[2],\n \"FIX\",\n float(fields[0]),\n float(fields[1]),\n None,\n None,\n None,\n None,\n )\n )\n\n c = requests.get(f\"{base_url}/earth_nav.dat\")\n\n for line in c.iter_lines():\n\n line = line.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) == 0 or line[0] == \"#\":\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example lines:\n # 2 58.61466599 125.42666626 451 522 30 0.0 A Aldan NDB\n # 3 31.26894444 -085.72630556 334 11120 40 -3.0 OZR CAIRNS VOR-DME\n # type lat lon elev freq ? var id desc\n # 0 1 2 3 4 5 6 7 8\n\n fields = line.split()\n\n # Valid line starts with integers\n if not fields[0].isdigit():\n continue # Next line\n\n # Get code for type of navaid\n itype = int(fields[0])\n\n # Type names\n wptypedict = {\n 2: \"NDB\",\n 3: \"VOR\",\n 4: \"ILS\",\n 5: \"LOC\",\n 6: \"GS\",\n 7: \"OM\",\n 8: \"MM\",\n 9: \"IM\",\n 12: \"DME\",\n 13: \"TACAN\",\n }\n\n # Type code never larger than 20\n if itype not in list(wptypedict.keys()):\n continue # Next line\n\n wptype = wptypedict[itype]\n\n # Select types to read\n if wptype not in [\"NDB\", \"VOR\", \"ILS\", \"GS\", \"DME\", \"TACAN\"]:\n continue # Next line\n\n # Find description\n try:\n idesc = line.index(fields[7]) + len(fields[7])\n description: Optional[str] = line[idesc:].strip().upper()\n except Exception:\n description = None\n\n navaids.append(\n Navaid(\n fields[7],\n wptype,\n float(fields[1]),\n float(fields[2]),\n float(fields[3][1:])\n if fields[3].startswith(\"0-\")\n else float(fields[3]),\n float(fields[4])\n if wptype == \"NDB\"\n else float(fields[4]) / 100,\n float(fields[6])\n if wptype in [\"VOR\", \"NDB\", \"ILS\", \"GS\"]\n else None,\n description,\n )\n )\n\n self._data = pd.DataFrame.from_records(\n navaids, columns=NavaidTuple._fields\n )\n\n self._data.to_pickle(self.cache_dir / \"traffic_navaid.pkl\")", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def _download_file(self, file_id, file_name, path):\n request = self.service.files().get_media(fileId=file_id)\n fh = io.FileIO(path + file_name, 'wb')\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n print('Start download ' + file_name)\n while not done:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))", "def download_file(path, filename, destination):\n import os\n command = \"wget -q -O \"+destination+\"/\"+filename+\" ftp://nomads.ncdc.noaa.gov/\"+path+\"/\"+filename\n os.system(command)", "def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)", "def single_file_download(url: str, encoding: str = \"utf-8\") -> str:\n\n recipient = BytesIO() # the stream we will write into\n\n # print(\"Opening %r . . .\" % url)\n curl = pycurl.Curl()\n curl.setopt(curl.URL, url)\n curl.setopt(curl.WRITEDATA, recipient)\n curl.perform()\n curl.close()\n # print(\"Closed %r.\" % url)\n\n return recipient.getvalue().decode(encoding)", "def download(dbx, folder, subfolder, name):\r\n path = '/%s/%s/%s' % (\"Apps\", \"Contract Drafter\", \"2.amr\")\r\n while '//' in path:\r\n path = path.replace('//', '/')\r\n with stopwatch('download'):\r\n try:\r\n md, res = dbx.files_download(path)\r\n except dropbox.exceptions.HttpError as err:\r\n print('*** HTTP error', err)\r\n return None\r\n data = res.content\r\n print(data, 'bytes; md:', md)\r\n return data" ]
[ "0.72803736", "0.64713925", "0.622999", "0.6218201", "0.6060453", "0.59448034", "0.57914644", "0.5711981", "0.5588932", "0.5571663", "0.556189", "0.5560884", "0.55341786", "0.553164", "0.55225044", "0.5511088", "0.5500932", "0.54671043", "0.54671043", "0.54506993", "0.53673744", "0.53551334", "0.53490084", "0.5342134", "0.5337148", "0.5291302", "0.52912056", "0.52849984", "0.52830195", "0.5264628", "0.5263901", "0.5262005", "0.5253883", "0.52462715", "0.5231539", "0.52268326", "0.5224647", "0.52085036", "0.5201269", "0.51993454", "0.5196138", "0.5181871", "0.5181871", "0.5148215", "0.51401764", "0.5133217", "0.51204747", "0.5097628", "0.5085273", "0.5079551", "0.5072027", "0.5071884", "0.5059822", "0.50580496", "0.5056327", "0.50458705", "0.5035814", "0.50347894", "0.5027734", "0.5019357", "0.50187045", "0.50171745", "0.5005088", "0.49908718", "0.49889165", "0.49878037", "0.49838343", "0.4982253", "0.49796486", "0.4977437", "0.49750414", "0.49567753", "0.4950297", "0.49478218", "0.49472657", "0.49471238", "0.49449864", "0.49333066", "0.4925643", "0.49209908", "0.4918894", "0.49152234", "0.49104682", "0.4903415", "0.48951015", "0.48935613", "0.48903114", "0.48866323", "0.48793647", "0.4872527", "0.48683497", "0.48674542", "0.486703", "0.4855747", "0.48538113", "0.48448047", "0.48425844", "0.4842512", "0.484235", "0.4837412" ]
0.71304876
1
A function for generating reaction likelihoods for a given genome according to the Probabilistic Annotation algorithm as
def generate_reaction_probabilities(fasta_file, template_model_file, genome_id=None): if genome_id is None: # Use fasta_file name minus extension. worker uses only for file names and logging genome_id = '.'.join(fasta_file.split('.')[0:-1]) # Create a worker for running the algorithm. worker = ProbAnnotationWorker(genome_id) try: template_model = _load_template_file(template_model_file) # Run blast using the fasta file. blast_result_file = worker.runBlast(fasta_file) # Calculate roleset probabilities. rolestring_tuples = worker.rolesetProbabilitiesMarble(blast_result_file) # Calculate per-gene role probabilities. role_probs = worker.rolesetProbabilitiesToRoleProbabilities(rolestring_tuples) # Calculate whole cell role probabilities. total_role_probs = worker.totalRoleProbabilities(role_probs) # Calculate complex probabilities. complex_probs = worker.complexProbabilities(total_role_probs, complexesToRequiredRoles=_complex_to_roles_dict(template_model)) # Calculate reaction probabilities. rxn_probs = worker.reactionProbabilities(complex_probs, rxnsToComplexes=_reactions_to_complexes_dict(template_model)) # Store in dictionary for better serialization return ReactionProbabilities([{'reaction': r[0], 'probability': r[1], 'type': r[2], 'complexes': _deserialize_cplx(r[3], worker.config['separator']), 'gpr': r[4]} for r in rxn_probs]) finally: worker.cleanup() # worker creates lots of temporary and intermediate files. Allow it to clean up
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def log_prob(self):", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def likelihood(seq):\n global qmap\n if qmap is None:\n qmap = {'!': 1.0, '\"': 0.7943282347242815, '#': 0.6309573444801932, '$': 0.5011872336272722, '%': 0.3981071705534972, '&': 0.31622776601683794, \"'\": 0.251188643150958, '(': 0.19952623149688797, ')': 0.15848931924611134, '*': 0.12589254117941673, '+': 0.1, ',': 0.07943282347242814, '-': 0.06309573444801933, '.': 0.05011872336272722, '/': 0.039810717055349734, '0': 0.03162277660168379, '1': 0.025118864315095794, '2': 0.0199526231496888, '3': 0.015848931924611134, '4': 0.012589254117941675, '5': 0.01, '6': 0.007943282347242814, '7': 0.00630957344480193, '8': 0.005011872336272725, '9': 0.003981071705534973, ':': 0.0031622776601683794, ';': 0.0025118864315095794, '<': 0.001995262314968879, '=': 0.001584893192461114, '>': 0.0012589254117941675, '?': 0.001, '@': 0.0007943282347242813, 'A': 0.000630957344480193, 'B': 0.0005011872336272725, 'C': 0.00039810717055349735, 'D': 0.00031622776601683794, 'E': 0.00025118864315095795, 'F': 0.00019952623149688788, 'G': 0.00015848931924611142, 'H': 0.00012589254117941674, 'I': 0.0001, 'J': 7.943282347242822e-05, 'K': 6.309573444801929e-05, 'L': 5.011872336272725e-05, 'M': 3.9810717055349695e-05, 'N': 3.1622776601683795e-05, 'O': 2.5118864315095822e-05, 'P': 1.9952623149688786e-05, 'Q': 1.584893192461114e-05, 'R': 1.2589254117941661e-05, 'S': 1e-05, 'T': 7.943282347242822e-06, 'U': 6.30957344480193e-06, 'V': 5.011872336272725e-06, 'W': 3.981071705534969e-06, 'X': 3.162277660168379e-06, 'Y': 2.5118864315095823e-06, 'Z': 1.9952623149688787e-06, '[': 1.584893192461114e-06, '\\\\': 1.2589254117941661e-06, ']': 1e-06, '^': 7.943282347242822e-07, '_': 6.30957344480193e-07, '`': 5.011872336272725e-07, 'a': 3.981071705534969e-07, 'b': 3.162277660168379e-07, 'c': 2.5118864315095823e-07, 'd': 1.9952623149688787e-07, 'e': 1.584893192461114e-07, 'f': 1.2589254117941662e-07, 'g': 1e-07, 'h': 7.943282347242822e-08, 'i': 6.30957344480193e-08, 'j': 5.011872336272725e-08, 'k': 3.981071705534969e-08, 'l': 3.162277660168379e-08, 'm': 2.511886431509582e-08, 'n': 1.9952623149688786e-08, 'o': 1.5848931924611143e-08, 'p': 1.2589254117941661e-08, 'q': 1e-08, 'r': 7.943282347242822e-09, 's': 6.309573444801943e-09, 't': 5.011872336272715e-09, 'u': 3.981071705534969e-09, 'v': 3.1622776601683795e-09, 'w': 2.511886431509582e-09, 'x': 1.9952623149688828e-09, 'y': 1.584893192461111e-09, 'z': 1.2589254117941663e-09, '{': 1e-09, '|': 7.943282347242822e-10, '}': 6.309573444801942e-10, '~': 5.011872336272714e-10, '\\x7f': 3.9810717055349694e-10, '\\x80': 3.1622776601683795e-10, '\\x81': 2.511886431509582e-10, '\\x82': 1.9952623149688828e-10, '\\x83': 1.584893192461111e-10, '\\x84': 1.2589254117941662e-10, '\\x85': 1e-10}\n return [qmap[i] for i in seq]", "def likelihood_genotype(genotype, bases_all_reads, error_rates):\n likelihood = 1\n for observed_base in bases_all_reads:\n p = 0\n for base in \"ACGT-\":\n l = prob_t_N(genotype, base) * error_rates[base][observed_base]\n p += l\n likelihood *= p\n\n return likelihood", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def likelihood(self):\n \n raise NotImplementedError()", "def annotate_effect(cds_dict, genome, snp):\n # List to save the coding effect\n coding_effect = []\n \n # Change the SNP position from 1-indexed to 0-indexed\n snp = (snp[0]-1, snp[1])\n \n # Determine which genes the SNP is located in\n genes = []\n for k,v in cds_dict.items():\n if snp[0] in range(v.location.start, v.location.end): \n genes.append(k)\n # Check that SNP is in a gene\n if genes: \n # Some SNPs will be in more than one gene, SARS has overlaping ORFs\n for gene in genes: \n gene_tuple = list(zip(list(cds_dict[gene].location), cds_dict[gene].location.extract(genome)))\n # Get the indicies relative to the gene, add 1 to get 1-indexed values\n indicies = [x + 1 for x, y in enumerate(gene_tuple) if y[0] == snp[0]]\n # Determine codon position from gene index\n for i in indicies:\n # First position in codon\n if i % 3 == 1:\n codonpos = 1\n wtcodon = [gene_tuple[i-1], gene_tuple[i], gene_tuple[i+1]]\n # Second position in codon\n elif i % 3 == 2:\n codonpos = 2\n wtcodon = [gene_tuple[i-2], gene_tuple[i-1], gene_tuple[i]]\n # Third position in codon \n elif i % 3 == 0:\n codonpos = 3\n wtcodon = [gene_tuple[i-3], gene_tuple[i-2], gene_tuple[i-1]]\n \n # From the wt codon sequence, determine the alterative codon, coding change, and effect\n altcodon = [snp if i == (codonpos-1) else b for i, b in enumerate(wtcodon)]\n wtaa = translate(\"\".join(y for x,y in wtcodon))\n altaa = translate(\"\".join(y for x,y in altcodon))\n if wtaa == altaa:\n effect = \"synonymous\"\n elif wtaa != altaa and altaa == '*':\n effect = \"nonsense\"\n elif wtaa != altaa and altaa != '*':\n effect = \"missense\"\n # Save the codon effects and information\n coding_effect.append((codonpos, f\"{wtaa}{-(i // -3)}{altaa}\", effect, gene))\n # If the SNP isn't in a gene, it's intergeneic and has no coding effect\n else:\n coding_effect.append((\"NA\", \"NA\", \"NA\", \"intergeneic\"))\n \n \n # Deal with SNPs in multiple genes with multiple effects \n if len(coding_effect) == 1:\n return list(coding_effect[0])\n else: \n if len(set([(a,b,c) for a,b,c,d in coding_effect])) == 1: \n return list(list(set(coding_effect))[0])\n # TODO: Deal with ambiguous sequences\n else:\n return [\"NA\", \"NA\", \"NA\", \"ambiguous\"]", "def target_log_prob_fn(self, *args, **kwargs): # pylint: disable=unused-argument\n\n def log_joint_fn(*args, **kwargs): # pylint: disable=unused-argument\n states = dict(zip(self.unobserved.keys(), args))\n states.update(self.observed)\n interceptor = interceptors.CollectLogProb(states)\n with ed.interception(interceptor):\n self._f(self._cfg)\n\n log_prob = sum(interceptor.log_probs)\n return log_prob\n return log_joint_fn", "def score_sequence(seq, ngramlogprobs):\n return", "def get_log_likelihood(response_probability, response):\n pass", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def Log_OB(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n n = nX[1]\n t = nX[2]\n\n G = np.zeros((m,n,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n Xout,PiA= CorrectPerm(xref,x[:,:,r])\n\n G[:,:,r] = Xout - np.dot(xref,np.dot(PiA,Xout))\n\n return G", "def calc_prob(number_of_strings, GC_content, DNA):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in DNA:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n #P(at least 1 match of s) = 1 − P(no matches out of N strings) = 1 − [1 - P_no_match]^N\r\n\r\n P_no_match = (((1 - GC_content)/2) **AT) * ((GC_content/2) **GC)\r\n prob = 1 - (1-P_no_match) **number_of_strings\r\n\r\n print(\"%0.3f\" %prob)", "def log_likelihood(self, data, reward_model, bias_params):", "def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):\n \n # Reading PWM\n pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)\n pwmName = pwmFileName.split(\"/\")[-1].split(\".\")[0]\n pwmLen = len(pwm)\n\n # Evaluating threshold\n pwmThreshold = 0.0\n if(scoringMethod == \"bitscore\"):\n pwmThreshold = bitscore\n elif(scoringMethod == \"fpr\"):\n sd = Motif.ScoreDistribution(pwm,precision=precision)\n pwmThreshold = sd.threshold_fpr(fpr)\n elif(scoringMethod == \"boyle\"):\n maxScore = pwm.max_score()\n minScore = 0.0 # TODO Boyle's rule is not suited for negative values.\n pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))\n else:\n sys.stderr.write(\"Choose a valid scoring method.\\n\")\n sys.exit(0)\n\n # Creating aditional parameters\n chrList = constants.getChromList(reference=[mpbsDict])\n tempMpbsDict = dict([(e,[]) for e in chrList])\n maxValue = -99.0\n\n # Iterating on chromosomes\n for chrName in chrList:\n\n # Reading genome\n sequence = genomeDict[chrName]\n\n # Performing biopython's motif matching\n for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):\n if(score > maxValue): maxValue = score\n if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,\"+\"])\n else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,\"-\"])\n\n # Update scores - new scores are within [0,1000]\n for chrName in chrList:\n for e in tempMpbsDict[chrName]:\n mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])\n \n return 0", "def gomeroccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n gomer_occupancy = 1\n area_pwm_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(pwm_length - 1, 1, -1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length):\n if j <= i:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n elif (j + i) > len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n # print \"got to else\"\n s = seq[j + i]\n prod_gomer *= pwm_dictionary[s][j]\n prod_gomer_rc *= area_pwm_rc[s][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n for i in range(len(seq) - 1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n prod_gomer *= pwm_dictionary[seq[j + i]][j]\n prod_gomer_rc *= area_pwm_rc[seq[j + i]][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n gomer_occupancy = 1 - gomer_occupancy\n\n return gomer_occupancy", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq", "def complex(self, sentence):\r\n repetition = 6000\r\n warmup = 2500\r\n pos_mcmc_dict = {\"pos_\" + str(i): {} for i in range(len(sentence))}\r\n sequence = [\"noun\"] * len(sentence)\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n prob_first = self.posterior_first(sentence[i])\r\n sample_first = list(\r\n np.random.choice(\r\n [keys for keys in prob_first.keys()],\r\n repetition,\r\n p=[\r\n float(prob_first[keys]) / sum(prob_first.values())\r\n for keys in prob_first.keys()\r\n ],\r\n )\r\n )\r\n sample_first = sample_first[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_first.count(pos)) / len(sample_first))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n elif i == 1:\r\n prob_second = self.post_second(sentence[i], sequence[i - 1])\r\n sample_second = list(\r\n np.random.choice(\r\n [keys for keys in prob_second.keys()],\r\n repetition,\r\n p=[\r\n float(prob_second[keys]) / sum(prob_second.values())\r\n for keys in prob_second.keys()\r\n ],\r\n )\r\n )\r\n sample_second = sample_second[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_second.count(pos)) / len(sample_second))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n else:\r\n prob_other = self.posterior_else(\r\n sentence[i], sequence[i - 1], sequence[i - 2]\r\n )\r\n sample_other = list(\r\n np.random.choice(\r\n [keys for keys in prob_other.keys()],\r\n repetition,\r\n p=[\r\n float(prob_other[keys]) / sum(prob_other.values())\r\n for keys in prob_other.keys()\r\n ],\r\n )\r\n )\r\n sample_other = sample_other[warmup:]\r\n pos_mcmc_dict[\"pos_\" + str(i)] = {\r\n pos: (float(sample_other.count(pos)) / len(sample_other))\r\n for pos in self.position_list\r\n }\r\n sequence[i] = max(\r\n pos_mcmc_dict[\"pos_\" + str(i)],\r\n key=pos_mcmc_dict[\"pos_\" + str(i)].get,\r\n )\r\n return sequence", "def make_fitness_function(cantus_firmus):\n def fitness_function(genome, debug=False):\n \"\"\"\n Given a candidate solution will return its fitness score assuming\n the cantus_firmus in this closure. Caches the fitness score in the\n genome.\n \"\"\"\n cp = genome.chromosome\n\n # calculate some information for easier scoring\n ScoringInfo = namedtuple(\"ScoringInfo\", \"pitch duration measure beat vi hi_next hi_prev voice_dir\")\n melody_info = list()\n beat = 4\n measure = 0\n for i in range(1, len(cp)):\n hi_next = abs(cp[i][0] - cp[i + 1][0]) if i != len(cp) - 1 else -1 # next horizontal interval\n hi_prev = abs(cp[i][0] - cp[i - 1][0]) if i != 1 else -1 # previous horizontal interval\n vi = abs(cp[i][0] - cantus_firmus[measure]) # vertical interval\n # voice movement direction\n voice_dir = 0 if i == len(cp) - 1 or cp[i + 1][0] == cp[i][0] else copysign(1, cp[i + 1][0] - cp[i][0])\n melody_info.append(ScoringInfo(cp[i][0], cp[i][1], measure, beat, vi, hi_next, hi_prev, voice_dir))\n beat += cp[i][1]\n measure += beat / 8\n beat %= 8\n\n if debug:\n print \"MELODY INFO: \", melody_info\n\n hscores = list()\n vscores = list()\n # hscore 1: 8th notes must move in step\n amount_of_8th = 0\n amount_of_missteps = 0\n for note in melody_info:\n if note.duration == 1:\n amount_of_8th += 1\n if note.hi_next > 1:\n amount_of_missteps += 1\n if note.hi_prev > 1:\n amount_of_missteps += 1\n hscores.append(float(amount_of_missteps) / (amount_of_8th * 2))\n if debug:\n print \"HSCORE 1: 8TH - \", amount_of_8th, \", MISSTEPS - \", amount_of_missteps\n\n # hscore 2: one climax, that can be repeated only after neighboring tone\n # hscore 3: Climax should be on the strong beat\n highest_note = max([note.pitch for note in melody_info])\n climax_count = 0\n climax_on_weak_beat_count = 0\n for i, note in enumerate(melody_info):\n if note.pitch == highest_note:\n climax_count += 1\n if note.beat not in [0, 4]:\n climax_on_weak_beat_count += 1\n if i < len(melody_info) - 2 and note.pitch == melody_info[i + 2].pitch: # If next note is\n if note.hi_next == 1 and melody_info[i + 2].hi_prev == 1: # neighboring tone\n if note.vi in CONSONANCES and melody_info[i + 2].vi in CONSONANCES: # And surrounding notes are consonant\n climax_count -= 1 # we can allow 2nd climax\n if melody_info[i + 2].beat not in [0, 4]:\n climax_on_weak_beat_count -= 1 # And 2nd climax may be on weak beat\n\n hscores.append(float(climax_count - 1) / len(melody_info))\n hscores.append(float(climax_on_weak_beat_count) / climax_count)\n\n if debug:\n print \"HSCORE 2+3: CLIMAX CNT - \", climax_count, \", WEAK CLIMAX CNT - \", climax_on_weak_beat_count\n\n # hscore 4: Horizontal intervals are consonant\n unconsonant_amount = len(filter(lambda x: x.hi_next not in CONSONANCES + [1], melody_info[:-1]))\n hscores.append(float(unconsonant_amount) / (len(melody_info) - 1))\n\n if debug:\n print \"HSCORE 4: UNCONSANANT AMOUNT - \", unconsonant_amount\n\n # hscore 5: Stepwise movement should predominate\n leaps_count = len(filter(lambda x: x.hi_next != 1, melody_info[:-1]))\n sections = round(float(len(cantus_firmus)) / 16)\n if leaps_count < (2 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n elif leaps_count > (4 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n else:\n hscores.append(0.0)\n\n if debug:\n print \"HSCORE 5: LEAPS - \", leaps_count, \"SECTIONS - \", sections\n\n # hscore 6: After large leap - stepwise motion\n large_leaps_count = 0\n large_leaps_not_followed_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3:\n large_leaps_count += 1\n if melody_info[i + 1].hi_next != 1:\n large_leaps_not_followed_count += 1\n hscores.append(float(large_leaps_not_followed_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 6: LL CNT - \", large_leaps_count, \"LL NOT FOLLOWED CNT - \", large_leaps_not_followed_count\n\n # hscore 7: change direction after each large leap\n large_leaps_not_changedir_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3 and note.voice_dir != -melody_info[i + 1].voice_dir:\n large_leaps_not_changedir_count += 1\n hscores.append(float(large_leaps_not_changedir_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 7: LL NOT CHNGDIR CNT - \", large_leaps_not_changedir_count\n\n # hscore 8: climax should be melodically consonant with tonic\n hscores.append(1.0 if highest_note - 4 in CONSONANCES else 0.0)\n\n # hscore 9: no more than 2 consecutive leaps\n conseq_leaps = 0\n punish_score = 0\n for note in melody_info:\n conseq_leaps += 1\n if note.hi_next in [0, 1]:\n conseq_leaps = 0\n if conseq_leaps > 3:\n punish_score += 1\n hscores.append(float(punish_score) / (len(melody_info) - 3))\n\n if debug:\n print \"HSCORE 9: CONSEQ LEAPS PUNISH SCORE - \", punish_score\n\n # hscore 10: no more than 2 large leaps per section\n if large_leaps_count > 2 * sections:\n hscores.append(float(large_leaps_count - 2 * sections) / (len(melody_info) - 1 - 2 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 11: not too long stepwise in same direction\n longest_stepwise_seq = 0\n current_stepwise_seq = 0\n prev_dir = 0\n num_changes = 0\n motion_vector = list()\n for note in melody_info:\n if note.hi_next <= 1:\n if note.voice_dir in [prev_dir, 0]:\n current_stepwise_seq += 1\n longest_stepwise_seq = max(longest_stepwise_seq, current_stepwise_seq)\n else:\n prev_dir = note.voice_dir\n current_stepwise_seq = 0\n num_changes += 1\n motion_vector.append(note.pitch)\n else:\n if note.voice_dir != prev_dir and note.voice_dir != 0:\n prev_dir = note.voice_dir\n num_changes += 1\n motion_vector.append(note.pitch)\n current_stepwise_seq = 0\n motion_vector.append(cp[-1][0])\n if longest_stepwise_seq < 5:\n longest_stepwise_seq = 0\n hscores.append(float(longest_stepwise_seq) / len(cp))\n\n if debug:\n print \"HSCORE 11: LONGEST STEPWISE SEQUENCE - \", longest_stepwise_seq\n\n # hscore 12: direction needs to change several times\n if num_changes < 3 * sections:\n hscores.append(1 - float(num_changes) / (3 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 13: ending note is tonic\n hscores.append(0)\n\n # hscore 14: penultimate note is leading tone\n hscores.append(0)\n\n # hscore 15: the start of a motion is consonant with the end of a motion\n unconsotant_count = 0\n big_leaps_count = 0\n for i in range(1, len(motion_vector) - 1):\n if abs(motion_vector[i] - motion_vector[i + 1]) not in CONSONANCES:\n unconsotant_count += 1\n if abs(motion_vector[i] - motion_vector[i + 1]) > 6:\n big_leaps_count += 1\n hscores.append(float(unconsotant_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 15: UNCONSONANT MOTIONS - \", unconsotant_count\n\n # hscore 16: Large motion intervals (>6 tones) should be avoided\n hscores.append(float(big_leaps_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 16: LARGE MOTIONS - \", big_leaps_count\n\n # hscore 17: No frequent repetition of the same note\n rep_count = 0\n for note in melody_info:\n if note.hi_next == 0:\n rep_count += 1\n if rep_count > 2 * sections:\n rep_count -= 2 * sections\n else:\n rep_count = 0\n hscores.append(float(rep_count) / (len(cp) - 2 * sections))\n\n if debug:\n print \"HSCORE 17: REPETITIONS COUNT - \", rep_count\n\n # hscore 18: no repetition of sequence within a 4 measure interval\n repeated = set()\n for i in range(len(melody_info) - 2):\n j = i + 1\n while melody_info[j].measure < melody_info[i].measure + 4 and j < len(melody_info) - 1:\n if melody_info[i].pitch == melody_info[j].pitch:\n k = 1\n while j + k < len(melody_info) and melody_info[j + k].pitch == melody_info[i + k].pitch:\n if k == 1:\n repeated.add(j)\n repeated.add(j + k)\n k += 1\n j += 1\n\n hscores.append(float(len(repeated)) / len(cp))\n\n if debug:\n print \"HSCORE 18: REPEATED POSITIONS - \", repeated\n\n # hscore 19: largest allowed interval is octave\n more_than_ocatave_amount = len(filter(lambda x: x.hi_next > 7, melody_info[:-1]))\n hscores.append(float(more_than_ocatave_amount) / len(cp))\n\n if debug:\n print \"HSCORE 19: MORE THAN OCTAVES - \", more_than_ocatave_amount\n\n # vscore 1: whole notes should be consonant (ensured by generation and hscore 13)\n vscores.append(0.0)\n\n # vscores 2 and 3: halves and quarters should be consonant on first beat.\n # or can be dissonant on other beats beat, if passing tone\n amount_of_notes = 0\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info):\n if note.duration >= 2:\n amount_of_notes += 1\n if note.beat == 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.beat != 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.hi_prev == 1 and note.hi_next == 1 and note.voice_dir == melody_info[i - 1].voice_dir:\n if melody_info[i - 1].vi in CONSONANCES and melody_info[i + 1].vi in CONSONANCES:\n amount_of_wrong_notes -= 1\n\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n\n if debug:\n print \"VSCORE 2+3: NOTES > THAN 8TH - \", amount_of_notes, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 4: one of eight notes from pair should be consonant\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.duration == 1 and melody_info[i + 1].duration == 1:\n if note.vi not in CONSONANCES and melody_info[i + 1].vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n beat += cp[i][1]\n vscores.append(float(amount_of_wrong_notes) / amount_of_8th)\n\n if debug:\n print \"VSCORE 4: 8TH NOTES - \", amount_of_8th, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 5: unisons ok if on 1st beat through suspension or if tied over (ensured by storing format)\n # else: if followed by step\n\n wrong_unsiones = len(filter(lambda x: x.vi == 0 and x.hi_next != 1, melody_info))\n vscores.append(float(wrong_unsiones) / len(cp))\n\n if debug:\n print \"VSCORE 5: WRONG UNISONES - \", wrong_unsiones\n\n # vscore 6: max allowed interval between voices is 10th, except for climax\n big_vert_intervals = len(filter(lambda x: x.vi > 9 and x.pitch != highest_note, melody_info))\n vscores.append(float(big_vert_intervals) / len(melody_info))\n\n if debug:\n print \"VSCORE 6: VERT INTERVALS > 10TH - \", big_vert_intervals\n\n\n # vscore 7: There should be no crossing (ensured by generation)\n vscores.append(0.0)\n\n # vscore 8: avoid the overlapping of parts (ensured by generation)\n vscores.append(0.0)\n\n # vscore 9: no leaps from unison to octave and vice versa\n uni_to_oct_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.vi == 7 and melody_info[i + 1].vi == 0:\n uni_to_oct_count += 1\n if note.vi == 0 and melody_info[i + 1].vi == 7:\n uni_to_oct_count += 1\n\n vscores.append(float(uni_to_oct_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 9: UNISON-OCTAVE LEAPS - \", uni_to_oct_count\n\n # vscore 10: The ending is unison or octave (ensured by generation)\n vscores.append(0.0)\n\n # vscore 11: all perfect intervals (also perfect fourth) should be approached by contrary or oblique motion\n bad_perfect_intervals_count = 0\n battuda = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0 and melody_info[i].vi in PERFECT_INTERVALS:\n bad_perfect_intervals_count += 1\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp and prev_cf != cur_cf: # oblique\n bad_perfect_intervals_count -= 1\n if prev_cp > cur_cp and prev_cf <= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n if prev_cp < cur_cp and prev_cf >= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n\n vscores.append(float(bad_perfect_intervals_count) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 11: PERF INTERVALS APPROACHED BADLY - \", bad_perfect_intervals_count\n\n # vscore 12: avoid simultaneous leaps in cf and cp, especially large leaps in same direction\n leaps_count = 0\n large_leaps_count = 0\n for i in range(len(melody_info) - 1):\n if melody_info[i + 1].beat == 0:\n leap_cp = melody_info[i + 1].pitch - melody_info[i].pitch\n leap_cf = cantus_firmus[melody_info[i + 1].measure] - cantus_firmus[melody_info[i].measure]\n if abs(leap_cf) > 1 and abs(leap_cp) > 1:\n leaps_count += 1\n if leap_cf > 6 and leap_cp > 6:\n large_leaps_count += 1\n if leap_cf < 6 and leap_cp < 6:\n large_leaps_count += 1\n vscores.append(float(leaps_count + large_leaps_count) / (len(cantus_firmus) * 2))\n\n if debug:\n print \"VSCORE 12: SIM LEAPS - \", leaps_count, \", LARGE SIM LEAPS - \", large_leaps_count\n\n # vscore 13: use all types of motion\n similar = 0\n contrary = 0\n oblique = 0\n parallel = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0:\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp:\n if prev_cf != cur_cf:\n oblique = 1\n else:\n similar = 1\n if prev_cp > cur_cp:\n if prev_cf <= cur_cf:\n contrary = 1\n else:\n parallel = 1\n if prev_cp < cur_cp:\n if prev_cf >= cur_cf:\n contrary = 1\n else:\n parallel = 1\n types_of_motion = similar + oblique + contrary + parallel\n vscores.append(1 - float(types_of_motion) / 4)\n if debug:\n print \"VSCORE 13: MOTION TYPES (SOCP) - \", similar, oblique, contrary, parallel\n\n # vscore 14: climax of the CF and CP should not coincide\n cf_highest_note = max(cantus_firmus)\n coincide = 0\n for note in melody_info:\n if note.pitch == highest_note and cantus_firmus[note.measure] == cf_highest_note:\n coincide = 1\n\n vscores.append(coincide)\n if debug:\n print \"VSCORE 14: COINCIDE - \", coincide\n\n # vscore 15: Successive unisons, octaves and fifths on first beats are only valid\n # when separated by three quarter notes.\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 1):\n if melody_info[i].beat == 0 and melody_info[i].measure != len(cantus_firmus) - 1:\n if melody_info[i].vi in [0, 4, 7]:\n separated = True\n j = 1\n while melody_info[i + j].measure == melody_info[i].measure:\n if melody_info[i + j].duration > 2:\n separated = False\n j += 1\n if melody_info[i + j].vi in [0, 4, 7] and not separated:\n bad_intervals_count += 1\n vscores.append(float(bad_intervals_count) / (len(cantus_firmus) - 1))\n if debug:\n print \"VSCORE 15: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 16: successive unisons, octaves and fifths not on first beats:\n # valid when separated by at least 2 notes, otherwise not.\n # Unless it is a consonant suspension of quarter note: ok for afterbeat fifths and octaves\n # separated only by a single quearter.\n # ***what a complex rule, we don't care about consonant suspension due to storing format >_<***\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 2):\n separated = True\n if melody_info[i].beat != 0:\n if melody_info[i].vi in [0, 4, 7]:\n if melody_info[i + 1].vi in [0, 4, 7]:\n separated = False\n if separated:\n if melody_info[i + 2].vi in [0, 4, 7]:\n separated = False\n if not separated:\n bad_intervals_count += 1\n\n vscores.append(float(bad_intervals_count) / (len(melody_info) - len(cantus_firmus)))\n\n if debug:\n print \"VSCORE 16: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 17: no ottava or quinta battuda, whatever it means\n vscores.append(float(battuda) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 17: BATTUDAS - \", battuda\n\n # vscore 18: best ending: dissonant suspension into the leading tone (ensured by generation)\n vscores.append(0)\n\n # vscore 19: Thirds, sixths and tenths should predominate.\n good_interval_count = len(filter(lambda x: x.vi in [2, 5, 9], melody_info))\n if good_interval_count * 2 > len(melody_info):\n vscores.append(0.0)\n else:\n vscores.append(1.0 - float(2 * good_interval_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 19: 3RDS 6THS 10THS - \", good_interval_count\n\n genome.fitness = sum([x * y for x, y in zip(hscores, HSCORE_WEIGHTS)]) + \\\n sum([x * y for x, y in zip(vscores, VSCORE_WEIGHTS)])\n if debug:\n print \"HSCORES: \", hscores\n print \"VSCORES: \", vscores\n print \"FINAL SCORE: \", genome.fitness\n print \"FINAL SCORE UNSCALED: \", sum(hscores) + sum(vscores)\n return genome.fitness\n\n return fitness_function", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def generate_log(nexp, clpeaks, timepeaks, remap, gap=0):\n # Select the index of the experiment\n peakini = 0\n i = 0\n while i < nexp:\n exp = timepeaks[i]\n peakini += exp.shape[0]\n i += 1\n\n exp = timepeaks[nexp]\n peakend = peakini + exp.shape[0]\n\n # Build the sequence string\n peakstr = []\n peakset = []\n\n for i in range(peakini, peakend):\n peakset.append(voc[remap[clpeaks[i][0] - 1]])\n if i < peakend - 1 and gap != 0:\n if (timepeaks[nexp][i - peakini + 1] - timepeaks[nexp][i - peakini]) > gap:\n peakstr.append(peakset)\n peakset = []\n\n return peakstr", "def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8", "def fitness_function(genome, debug=False):\n cp = genome.chromosome\n\n # calculate some information for easier scoring\n ScoringInfo = namedtuple(\"ScoringInfo\", \"pitch duration measure beat vi hi_next hi_prev voice_dir\")\n melody_info = list()\n beat = 4\n measure = 0\n for i in range(1, len(cp)):\n hi_next = abs(cp[i][0] - cp[i + 1][0]) if i != len(cp) - 1 else -1 # next horizontal interval\n hi_prev = abs(cp[i][0] - cp[i - 1][0]) if i != 1 else -1 # previous horizontal interval\n vi = abs(cp[i][0] - cantus_firmus[measure]) # vertical interval\n # voice movement direction\n voice_dir = 0 if i == len(cp) - 1 or cp[i + 1][0] == cp[i][0] else copysign(1, cp[i + 1][0] - cp[i][0])\n melody_info.append(ScoringInfo(cp[i][0], cp[i][1], measure, beat, vi, hi_next, hi_prev, voice_dir))\n beat += cp[i][1]\n measure += beat / 8\n beat %= 8\n\n if debug:\n print \"MELODY INFO: \", melody_info\n\n hscores = list()\n vscores = list()\n # hscore 1: 8th notes must move in step\n amount_of_8th = 0\n amount_of_missteps = 0\n for note in melody_info:\n if note.duration == 1:\n amount_of_8th += 1\n if note.hi_next > 1:\n amount_of_missteps += 1\n if note.hi_prev > 1:\n amount_of_missteps += 1\n hscores.append(float(amount_of_missteps) / (amount_of_8th * 2))\n if debug:\n print \"HSCORE 1: 8TH - \", amount_of_8th, \", MISSTEPS - \", amount_of_missteps\n\n # hscore 2: one climax, that can be repeated only after neighboring tone\n # hscore 3: Climax should be on the strong beat\n highest_note = max([note.pitch for note in melody_info])\n climax_count = 0\n climax_on_weak_beat_count = 0\n for i, note in enumerate(melody_info):\n if note.pitch == highest_note:\n climax_count += 1\n if note.beat not in [0, 4]:\n climax_on_weak_beat_count += 1\n if i < len(melody_info) - 2 and note.pitch == melody_info[i + 2].pitch: # If next note is\n if note.hi_next == 1 and melody_info[i + 2].hi_prev == 1: # neighboring tone\n if note.vi in CONSONANCES and melody_info[i + 2].vi in CONSONANCES: # And surrounding notes are consonant\n climax_count -= 1 # we can allow 2nd climax\n if melody_info[i + 2].beat not in [0, 4]:\n climax_on_weak_beat_count -= 1 # And 2nd climax may be on weak beat\n\n hscores.append(float(climax_count - 1) / len(melody_info))\n hscores.append(float(climax_on_weak_beat_count) / climax_count)\n\n if debug:\n print \"HSCORE 2+3: CLIMAX CNT - \", climax_count, \", WEAK CLIMAX CNT - \", climax_on_weak_beat_count\n\n # hscore 4: Horizontal intervals are consonant\n unconsonant_amount = len(filter(lambda x: x.hi_next not in CONSONANCES + [1], melody_info[:-1]))\n hscores.append(float(unconsonant_amount) / (len(melody_info) - 1))\n\n if debug:\n print \"HSCORE 4: UNCONSANANT AMOUNT - \", unconsonant_amount\n\n # hscore 5: Stepwise movement should predominate\n leaps_count = len(filter(lambda x: x.hi_next != 1, melody_info[:-1]))\n sections = round(float(len(cantus_firmus)) / 16)\n if leaps_count < (2 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n elif leaps_count > (4 * sections):\n hscores.append(float(leaps_count) / (len(melody_info) - 1 - 4 * sections))\n else:\n hscores.append(0.0)\n\n if debug:\n print \"HSCORE 5: LEAPS - \", leaps_count, \"SECTIONS - \", sections\n\n # hscore 6: After large leap - stepwise motion\n large_leaps_count = 0\n large_leaps_not_followed_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3:\n large_leaps_count += 1\n if melody_info[i + 1].hi_next != 1:\n large_leaps_not_followed_count += 1\n hscores.append(float(large_leaps_not_followed_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 6: LL CNT - \", large_leaps_count, \"LL NOT FOLLOWED CNT - \", large_leaps_not_followed_count\n\n # hscore 7: change direction after each large leap\n large_leaps_not_changedir_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.hi_next >= 3 and note.voice_dir != -melody_info[i + 1].voice_dir:\n large_leaps_not_changedir_count += 1\n hscores.append(float(large_leaps_not_changedir_count) / large_leaps_count if large_leaps_count != 0 else 0.0)\n\n if debug:\n print \"HSCORE 7: LL NOT CHNGDIR CNT - \", large_leaps_not_changedir_count\n\n # hscore 8: climax should be melodically consonant with tonic\n hscores.append(1.0 if highest_note - 4 in CONSONANCES else 0.0)\n\n # hscore 9: no more than 2 consecutive leaps\n conseq_leaps = 0\n punish_score = 0\n for note in melody_info:\n conseq_leaps += 1\n if note.hi_next in [0, 1]:\n conseq_leaps = 0\n if conseq_leaps > 3:\n punish_score += 1\n hscores.append(float(punish_score) / (len(melody_info) - 3))\n\n if debug:\n print \"HSCORE 9: CONSEQ LEAPS PUNISH SCORE - \", punish_score\n\n # hscore 10: no more than 2 large leaps per section\n if large_leaps_count > 2 * sections:\n hscores.append(float(large_leaps_count - 2 * sections) / (len(melody_info) - 1 - 2 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 11: not too long stepwise in same direction\n longest_stepwise_seq = 0\n current_stepwise_seq = 0\n prev_dir = 0\n num_changes = 0\n motion_vector = list()\n for note in melody_info:\n if note.hi_next <= 1:\n if note.voice_dir in [prev_dir, 0]:\n current_stepwise_seq += 1\n longest_stepwise_seq = max(longest_stepwise_seq, current_stepwise_seq)\n else:\n prev_dir = note.voice_dir\n current_stepwise_seq = 0\n num_changes += 1\n motion_vector.append(note.pitch)\n else:\n if note.voice_dir != prev_dir and note.voice_dir != 0:\n prev_dir = note.voice_dir\n num_changes += 1\n motion_vector.append(note.pitch)\n current_stepwise_seq = 0\n motion_vector.append(cp[-1][0])\n if longest_stepwise_seq < 5:\n longest_stepwise_seq = 0\n hscores.append(float(longest_stepwise_seq) / len(cp))\n\n if debug:\n print \"HSCORE 11: LONGEST STEPWISE SEQUENCE - \", longest_stepwise_seq\n\n # hscore 12: direction needs to change several times\n if num_changes < 3 * sections:\n hscores.append(1 - float(num_changes) / (3 * sections))\n else:\n hscores.append(0.0)\n\n # hscore 13: ending note is tonic\n hscores.append(0)\n\n # hscore 14: penultimate note is leading tone\n hscores.append(0)\n\n # hscore 15: the start of a motion is consonant with the end of a motion\n unconsotant_count = 0\n big_leaps_count = 0\n for i in range(1, len(motion_vector) - 1):\n if abs(motion_vector[i] - motion_vector[i + 1]) not in CONSONANCES:\n unconsotant_count += 1\n if abs(motion_vector[i] - motion_vector[i + 1]) > 6:\n big_leaps_count += 1\n hscores.append(float(unconsotant_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 15: UNCONSONANT MOTIONS - \", unconsotant_count\n\n # hscore 16: Large motion intervals (>6 tones) should be avoided\n hscores.append(float(big_leaps_count) / len(motion_vector))\n\n if debug:\n print \"HSCORE 16: LARGE MOTIONS - \", big_leaps_count\n\n # hscore 17: No frequent repetition of the same note\n rep_count = 0\n for note in melody_info:\n if note.hi_next == 0:\n rep_count += 1\n if rep_count > 2 * sections:\n rep_count -= 2 * sections\n else:\n rep_count = 0\n hscores.append(float(rep_count) / (len(cp) - 2 * sections))\n\n if debug:\n print \"HSCORE 17: REPETITIONS COUNT - \", rep_count\n\n # hscore 18: no repetition of sequence within a 4 measure interval\n repeated = set()\n for i in range(len(melody_info) - 2):\n j = i + 1\n while melody_info[j].measure < melody_info[i].measure + 4 and j < len(melody_info) - 1:\n if melody_info[i].pitch == melody_info[j].pitch:\n k = 1\n while j + k < len(melody_info) and melody_info[j + k].pitch == melody_info[i + k].pitch:\n if k == 1:\n repeated.add(j)\n repeated.add(j + k)\n k += 1\n j += 1\n\n hscores.append(float(len(repeated)) / len(cp))\n\n if debug:\n print \"HSCORE 18: REPEATED POSITIONS - \", repeated\n\n # hscore 19: largest allowed interval is octave\n more_than_ocatave_amount = len(filter(lambda x: x.hi_next > 7, melody_info[:-1]))\n hscores.append(float(more_than_ocatave_amount) / len(cp))\n\n if debug:\n print \"HSCORE 19: MORE THAN OCTAVES - \", more_than_ocatave_amount\n\n # vscore 1: whole notes should be consonant (ensured by generation and hscore 13)\n vscores.append(0.0)\n\n # vscores 2 and 3: halves and quarters should be consonant on first beat.\n # or can be dissonant on other beats beat, if passing tone\n amount_of_notes = 0\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info):\n if note.duration >= 2:\n amount_of_notes += 1\n if note.beat == 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.beat != 0 and note.vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n if note.hi_prev == 1 and note.hi_next == 1 and note.voice_dir == melody_info[i - 1].voice_dir:\n if melody_info[i - 1].vi in CONSONANCES and melody_info[i + 1].vi in CONSONANCES:\n amount_of_wrong_notes -= 1\n\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n vscores.append(float(amount_of_wrong_notes) / amount_of_notes)\n\n if debug:\n print \"VSCORE 2+3: NOTES > THAN 8TH - \", amount_of_notes, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 4: one of eight notes from pair should be consonant\n amount_of_wrong_notes = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.duration == 1 and melody_info[i + 1].duration == 1:\n if note.vi not in CONSONANCES and melody_info[i + 1].vi not in CONSONANCES:\n amount_of_wrong_notes += 1\n beat += cp[i][1]\n vscores.append(float(amount_of_wrong_notes) / amount_of_8th)\n\n if debug:\n print \"VSCORE 4: 8TH NOTES - \", amount_of_8th, \",DISSONANT ONES - \", amount_of_wrong_notes\n\n # vscore 5: unisons ok if on 1st beat through suspension or if tied over (ensured by storing format)\n # else: if followed by step\n\n wrong_unsiones = len(filter(lambda x: x.vi == 0 and x.hi_next != 1, melody_info))\n vscores.append(float(wrong_unsiones) / len(cp))\n\n if debug:\n print \"VSCORE 5: WRONG UNISONES - \", wrong_unsiones\n\n # vscore 6: max allowed interval between voices is 10th, except for climax\n big_vert_intervals = len(filter(lambda x: x.vi > 9 and x.pitch != highest_note, melody_info))\n vscores.append(float(big_vert_intervals) / len(melody_info))\n\n if debug:\n print \"VSCORE 6: VERT INTERVALS > 10TH - \", big_vert_intervals\n\n\n # vscore 7: There should be no crossing (ensured by generation)\n vscores.append(0.0)\n\n # vscore 8: avoid the overlapping of parts (ensured by generation)\n vscores.append(0.0)\n\n # vscore 9: no leaps from unison to octave and vice versa\n uni_to_oct_count = 0\n for i, note in enumerate(melody_info[:-1]):\n if note.vi == 7 and melody_info[i + 1].vi == 0:\n uni_to_oct_count += 1\n if note.vi == 0 and melody_info[i + 1].vi == 7:\n uni_to_oct_count += 1\n\n vscores.append(float(uni_to_oct_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 9: UNISON-OCTAVE LEAPS - \", uni_to_oct_count\n\n # vscore 10: The ending is unison or octave (ensured by generation)\n vscores.append(0.0)\n\n # vscore 11: all perfect intervals (also perfect fourth) should be approached by contrary or oblique motion\n bad_perfect_intervals_count = 0\n battuda = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0 and melody_info[i].vi in PERFECT_INTERVALS:\n bad_perfect_intervals_count += 1\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp and prev_cf != cur_cf: # oblique\n bad_perfect_intervals_count -= 1\n if prev_cp > cur_cp and prev_cf <= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n if prev_cp < cur_cp and prev_cf >= cur_cf: # contrary\n bad_perfect_intervals_count -= 1\n if melody_info[i].vi in [4, 7]:\n battuda += 1\n\n vscores.append(float(bad_perfect_intervals_count) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 11: PERF INTERVALS APPROACHED BADLY - \", bad_perfect_intervals_count\n\n # vscore 12: avoid simultaneous leaps in cf and cp, especially large leaps in same direction\n leaps_count = 0\n large_leaps_count = 0\n for i in range(len(melody_info) - 1):\n if melody_info[i + 1].beat == 0:\n leap_cp = melody_info[i + 1].pitch - melody_info[i].pitch\n leap_cf = cantus_firmus[melody_info[i + 1].measure] - cantus_firmus[melody_info[i].measure]\n if abs(leap_cf) > 1 and abs(leap_cp) > 1:\n leaps_count += 1\n if leap_cf > 6 and leap_cp > 6:\n large_leaps_count += 1\n if leap_cf < 6 and leap_cp < 6:\n large_leaps_count += 1\n vscores.append(float(leaps_count + large_leaps_count) / (len(cantus_firmus) * 2))\n\n if debug:\n print \"VSCORE 12: SIM LEAPS - \", leaps_count, \", LARGE SIM LEAPS - \", large_leaps_count\n\n # vscore 13: use all types of motion\n similar = 0\n contrary = 0\n oblique = 0\n parallel = 0\n for i in range(1, len(melody_info)):\n if melody_info[i].beat == 0:\n prev_cf = cantus_firmus[melody_info[i - 1].measure]\n prev_cp = melody_info[i - 1].pitch\n cur_cf = cantus_firmus[melody_info[i].measure]\n cur_cp = melody_info[i].pitch\n if prev_cp == cur_cp:\n if prev_cf != cur_cf:\n oblique = 1\n else:\n similar = 1\n if prev_cp > cur_cp:\n if prev_cf <= cur_cf:\n contrary = 1\n else:\n parallel = 1\n if prev_cp < cur_cp:\n if prev_cf >= cur_cf:\n contrary = 1\n else:\n parallel = 1\n types_of_motion = similar + oblique + contrary + parallel\n vscores.append(1 - float(types_of_motion) / 4)\n if debug:\n print \"VSCORE 13: MOTION TYPES (SOCP) - \", similar, oblique, contrary, parallel\n\n # vscore 14: climax of the CF and CP should not coincide\n cf_highest_note = max(cantus_firmus)\n coincide = 0\n for note in melody_info:\n if note.pitch == highest_note and cantus_firmus[note.measure] == cf_highest_note:\n coincide = 1\n\n vscores.append(coincide)\n if debug:\n print \"VSCORE 14: COINCIDE - \", coincide\n\n # vscore 15: Successive unisons, octaves and fifths on first beats are only valid\n # when separated by three quarter notes.\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 1):\n if melody_info[i].beat == 0 and melody_info[i].measure != len(cantus_firmus) - 1:\n if melody_info[i].vi in [0, 4, 7]:\n separated = True\n j = 1\n while melody_info[i + j].measure == melody_info[i].measure:\n if melody_info[i + j].duration > 2:\n separated = False\n j += 1\n if melody_info[i + j].vi in [0, 4, 7] and not separated:\n bad_intervals_count += 1\n vscores.append(float(bad_intervals_count) / (len(cantus_firmus) - 1))\n if debug:\n print \"VSCORE 15: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 16: successive unisons, octaves and fifths not on first beats:\n # valid when separated by at least 2 notes, otherwise not.\n # Unless it is a consonant suspension of quarter note: ok for afterbeat fifths and octaves\n # separated only by a single quearter.\n # ***what a complex rule, we don't care about consonant suspension due to storing format >_<***\n bad_intervals_count = 0\n\n for i in range(len(melody_info) - 2):\n separated = True\n if melody_info[i].beat != 0:\n if melody_info[i].vi in [0, 4, 7]:\n if melody_info[i + 1].vi in [0, 4, 7]:\n separated = False\n if separated:\n if melody_info[i + 2].vi in [0, 4, 7]:\n separated = False\n if not separated:\n bad_intervals_count += 1\n\n vscores.append(float(bad_intervals_count) / (len(melody_info) - len(cantus_firmus)))\n\n if debug:\n print \"VSCORE 16: BAD INTERVALS - \", bad_intervals_count\n\n # vscore 17: no ottava or quinta battuda, whatever it means\n vscores.append(float(battuda) / len(cantus_firmus))\n\n if debug:\n print \"VSCORE 17: BATTUDAS - \", battuda\n\n # vscore 18: best ending: dissonant suspension into the leading tone (ensured by generation)\n vscores.append(0)\n\n # vscore 19: Thirds, sixths and tenths should predominate.\n good_interval_count = len(filter(lambda x: x.vi in [2, 5, 9], melody_info))\n if good_interval_count * 2 > len(melody_info):\n vscores.append(0.0)\n else:\n vscores.append(1.0 - float(2 * good_interval_count) / len(melody_info))\n\n if debug:\n print \"VSCORE 19: 3RDS 6THS 10THS - \", good_interval_count\n\n genome.fitness = sum([x * y for x, y in zip(hscores, HSCORE_WEIGHTS)]) + \\\n sum([x * y for x, y in zip(vscores, VSCORE_WEIGHTS)])\n if debug:\n print \"HSCORES: \", hscores\n print \"VSCORES: \", vscores\n print \"FINAL SCORE: \", genome.fitness\n print \"FINAL SCORE UNSCALED: \", sum(hscores) + sum(vscores)\n return genome.fitness", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def compute_movie_rating_likelihood(M):\n\n # define the size to begin with\n likelihood = np.zeros((M, M))\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (c)\n #\n # Remember to normalize the likelihood, so that each column is a\n # probability distribution.\n \n for i in range(M):\n for j in range(M):\n if i == j:\n likelihood[i][j] = 2\n else:\n likelihood[i][j] = 1/abs(j-i)\n \n likelihood = likelihood / likelihood.sum(axis = 1)\n \n #\n # END OF YOUR CODE FOR PART (c)\n # -------------------------------------------------------------------------\n\n return likelihood", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def outputGenerationProbability(self):\n self.b = zeros((self.noOfEmmittingStates, self.T))\n for row in range(self.noOfEmmittingStates):\n for col in range(self.T):\n self.b[row, col] = self.gaussianDist(self.observationSequence[0, col],\n self.outputProbabilities[row, 0],\n self.outputProbabilities[row, 1])", "def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return", "def design_guide_and_donor_for_genes(input_design_table_filename, input_annot_gff_filename, input_genome_fasta_filename,\n PAM_seq = 'GA', excluded_seqs = ['TTTT'], BOWTIE_exe = '',\n off_targets_min_mismatch = 10,\n min_azimuth_score = 0.5,\n sort_by = '5to3', \n min_dist_cut_to_donor_edge = 30,\n filter_out_donors_containing_excluded_seqs = False,\n max_guide_pos_frac_in_gene = 1.1,\n output_guides_df_filename = '', output_donors_df_filename = ''):\n \n ##################\n # processing input\n ##################\n\n # fixing the mapping command to be the default bowtie\n if not BOWTIE_exe or len(BOWTIE_exe) < 3:\n BOWTIE_exe = '/Users/eilon/software/bowtie2-2.2.8/bowtie2'\n \n # defining the bowtie mapping cmd\n bowtie_cmd = shlex.split(BOWTIE_exe + ' -x ' + os.path.splitext(input_genome_fasta_filename)[0] + ' -U - -f -D 20 -R 3 -N 1 -L 10 -i S,1,0.50 --gbar 3 --end-to-end -k 30 --no-head -t --rdg 10,6 --rfg 10,6')\n mapping_cmd = bowtie_cmd\n\n \n # loading design matrix \n # fields: gene_id\tguide_num\tdonor_mut_type\tnum_donor_variants\tmut_pos_in_guide\tdonor_length\tdonor_seq_offsets\n\n design_df = pd.read_table(input_design_table_filename, sep='\\t', na_values = \"\")\n design_df['donor_seq_offsets'] = design_df['donor_seq_offsets'].apply(ast.literal_eval)\n design_df['donor_seq_offsets'] = design_df['donor_seq_offsets'].apply(np.array,dtype=np.float)\n \n design_df['gene_id'] = design_df['gene_id'].str.strip()\n design_df['donor_mut_type'] = design_df['donor_mut_type'].str.strip()\n \n \n # loading gene gff matrix\n print \"loading genes gff file: \" + input_annot_gff_filename\n genes_gff_df = sgd_gff2dataframe(input_annot_gff_filename, ['CDS'])\n \n # loading genome fasta file\n print \"loading genome fasta file: \" + input_genome_fasta_filename\n genome_seq = SeqIO.to_dict(SeqIO.parse(open(input_genome_fasta_filename),'fasta', alphabet=generic_dna))\n \n\n \n # init output dataframes\n out_guide_df = pd.DataFrame(data=None)\n out_guide_donor_df = pd.DataFrame(data=None)\n \n ####################\n # running gene by gene such that the donor sequences will use the same guides\n # if differnet number of guides are specified for each donor desing the guides will be selected from best to worst\n ####################\n \n # grouping by gene\n design_grouped = design_df.groupby('gene_id')\n \n\n # iterating over the genes\n for cur_gene_id, cur_gene_df in design_grouped:\n\n print \"Designing guides for gene:\" + cur_gene_id\n \n # the current gene object\n cur_gene = CRISPR_Gene(cur_gene_id, genes_gff_df, genome_seq)\n \n # if differnet number of guides are specified for each donor desing the guides will be selected from best to worst\n cur_gene_max_guide_num = max(cur_gene_df['guide_num'])\n \n # if even one row requires the guide to be entirely in the CDS - find only guides that are entirely in the CDS\n #cur_require_entire_guide_to_be_in_CDS = np.any(~ np.isnan(cur_gene_df['mut_pos_in_guide'].values))\n # np.any(cur_gene_df['require_entire_guide_in_CDS'])\n #\n cur_min_mut_pos_in_guide = min(int(-cur_gene.CRISPR_CUT_INDEX - 1*(cur_gene.is_neg_strand())),np.nanmin(cur_gene_df['mut_pos_in_guide'].values))\n cur_max_mut_pos_in_guide = max(2,np.nanmax(cur_gene_df['mut_pos_in_guide'].values))\n \n \n # max half region length to look for excluded sequences (should be at least ~20 to test the guide)\n \n\n cur_max_donor_seq_offset = np.nanmax(np.abs(cur_gene_df['donor_seq_offsets'].apply(np.nanmax).values))\n if np.isnan(cur_max_donor_seq_offset):\n cur_max_donor_seq_offset = 0\n \n \n \n cur_max_seq_len_around_cut = int( np.ceil(max(cur_gene_df['donor_length']) / 2) + \\\n cur_max_donor_seq_offset)\n \n #print \"Extracting all guides\"\n \n # get all guides (computing filter guides that contain excluded sequences in the sequences around them)\n cur_all_gene_guides_df = cur_gene.get_all_guides_that_cut_in_cds(pam = PAM_seq, seq_len_around_cut = cur_max_seq_len_around_cut, \n min_mut_pos_in_guide = cur_min_mut_pos_in_guide,\n max_mut_pos_in_guide = cur_max_mut_pos_in_guide,\n excluded_seqs = excluded_seqs, mapping_cmd = mapping_cmd,\n sort_by = sort_by)\n \n #DEBUG\n #print \"before get_K_best_guide_ids\" #DEBUG\n #print sum(cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_33') #DEBUG\n \n #print cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_33' ]\n #print cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id']== 'YAL001C_pS_20' ]\n #print cur_gene_max_guide_num\n #print off_targets_min_mismatch\n #print min_azimuth_score\n #print max_guide_pos_frac_in_gene\n\n # select top 'best' guides\n selected_guide_ids = cur_gene.get_K_best_guide_ids(guides_df = cur_all_gene_guides_df, K = cur_gene_max_guide_num, \n off_targets_min_mismatch = off_targets_min_mismatch,\n min_azimuth_score = min_azimuth_score,\n max_guide_pos_frac_in_gene = max_guide_pos_frac_in_gene,\n sort_by = sort_by)\n #print \"after get_K_best_guide_ids\" #DEBUG\n #for gi in selected_guide_ids:\n # print gi\n #print ('YAL001C_pS_33' in list(selected_guide_ids) ) \n #print 'XXX-------------------' #DEBUG\n \n # selected guides (each guide should have at least one donor designed)\n cur_selected_guide_df = cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id'].isin( list(selected_guide_ids) ) ]\n \n print \"--- Designing donor sequences for gene:\" + cur_gene_id + \", for \" + str(len(selected_guide_ids)) + \" guides (out of \" + str(cur_gene_max_guide_num) + \" requested)\"\n #print cur_selected_guide_df[ cur_selected_guide_df['guide_id'] == 'YAL001C_pS_33' ]\n \n # concating with the output dataframe\n out_guide_df = pd.concat([out_guide_df,cur_selected_guide_df],ignore_index=True)\n\n \n # desinging the donor sequences\n for idx,row in cur_gene_df.iterrows():\n \n if len(selected_guide_ids) < int(row['guide_num']):\n warnings.warn(\"There are NOT enough guides in --- %s --- for the design. There are %d guides and the design if for %d\" % (str(row[\"gene_id\"]),len(selected_guide_ids), row['guide_num'] ))\n \n if len(selected_guide_ids) <= 0:\n continue\n \n cur_selected_guide_ids = selected_guide_ids.iloc[range( min(int(row['guide_num']),len(selected_guide_ids)) )]\n \n \n # do reverse complement for donor sequences\n if 'do_revcomp_donor' in cur_gene_df.columns:\n do_revcomp_donor = bool(row['do_revcomp_donor']==True)\n else:\n do_revcomp_donor = False\n \n \n \n \n # do reverse complement for donor sequences\n if 'do_scramble_guide_and_donor' in cur_gene_df.columns:\n scramble_guide_and_donor = bool(row['do_scramble_guide_and_donor']==True)\n else:\n scramble_guide_and_donor = False\n \n #print \"do_revcomp_donor %d\" % (do_revcomp_donor)\n #print \"scramble_guide_and_donor %d\" % (scramble_guide_and_donor)\n \n cur_all_gene_guides_df_fordonors = cur_all_gene_guides_df\n \n # permuting the guides and adding it to the guide df\n if scramble_guide_and_donor:\n print 'Scramble donor guides...'\n cur_all_gene_guides_df_scramble = cur_all_gene_guides_df[ cur_all_gene_guides_df['guide_id'].isin(cur_selected_guide_ids) ].copy()\n # scrambling the guides and updating their ids\n cur_all_gene_guides_df_scramble['guide_id'] = cur_all_gene_guides_df_scramble['guide_id'] + '_scramble'\n scramble_func = lambda x: ''.join(random.sample(str(x),len(str(x))))\n cur_all_gene_guides_df_scramble['guide'] = cur_all_gene_guides_df_scramble['guide'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_noPAM'] = cur_all_gene_guides_df_scramble['guide_noPAM'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_PAM_p7'] = cur_all_gene_guides_df_scramble['guide_PAM_p7'].apply(scramble_func)\n cur_all_gene_guides_df_scramble['guide_PAM_m4p3'] = cur_all_gene_guides_df_scramble['guide_PAM_m4p3'].apply(scramble_func)\n # adding the scrambeles guides to the guides table\n out_guide_df = pd.concat([out_guide_df,cur_all_gene_guides_df_scramble],ignore_index=True)\n\n \n # for donors design\n cur_all_gene_guides_df_fordonors = cur_all_gene_guides_df_scramble\n cur_selected_guide_ids = cur_selected_guide_ids + '_scramble'\n \n \n cur_gene_donor_df = cur_gene.get_donor_mut_for_guides(cur_all_gene_guides_df_fordonors, cur_selected_guide_ids, \n donor_mut_type=str(row['donor_mut_type']), \n num_donor_variants = int(row['num_donor_variants']), \n mut_pos_in_guide = row['mut_pos_in_guide'], \n donor_length=int(row['donor_length']), \n donor_seq_offsets = row['donor_seq_offsets'],\n set_name = row['set_name'],\n min_dist_cut_to_donor_edge = min_dist_cut_to_donor_edge,\n excluded_seqs = excluded_seqs,\n do_revcomp_donor = do_revcomp_donor,\n scramble_guide_and_donor = scramble_guide_and_donor)\n \n out_guide_donor_df = pd.concat([out_guide_donor_df,cur_gene_donor_df],ignore_index=True)\n\n #print \"---------------------------- Finished designing guide for the above gene\"\n\n\n # adding for each guide tis location in the gene (now added in )\n #out_guide_df[\"guide_cut_gene_pos_frac\"] = out_guide_df[\"guide_cut_gene_nt_pos\"] / out_guide_df[\"CDS_len_nts\"]\n\n # filtering out donor sequences with excluded sequences\n if filter_out_donors_containing_excluded_seqs:\n if out_guide_donor_df.shape[1] > 1: # if not null df\n out_guide_donor_df = out_guide_donor_df[ (~ out_guide_donor_df[\"contain_excluded_sequences\"]).values ]\n \n \n if len(output_guides_df_filename) > 3:\n print \"saving guides to: \" + output_guides_df_filename\n out_guide_df.to_csv(output_guides_df_filename, sep='\\t', index = False)\n \n \n \n if len(output_donors_df_filename) > 3:\n print \"saving donor sequences to: \" + output_donors_df_filename\n out_guide_donor_df.to_csv(output_donors_df_filename, sep='\\t', index = False)\n \n \n return( (out_guide_df, out_guide_donor_df) )", "def generate_rbpdb_experimental_to_pwm(letter_strength, n_repeat_req):\n rbpdb_experiment_file_path = (\n \"./website/data/RBPDB_v1.3.1_experiments_human_2012-11-21.tdt\"\n )\n rbpdb_pfm_file_directory = \"./website/data/rbpdb-human-pfm-matrices/\"\n experimental_to_pwm_dict = {}\n with open(rbpdb_experiment_file_path) as handle:\n line = handle.readline()\n while line:\n columns = line.split(\"\\t\")\n # Here we expect the columns to be:\n # experimental_id, PUBMED_ID, exp_type, notes, seq_motif,\n # selex_file, aligned_selex_file,\n # aligned_motif_file, PWM_file, PFM_file, logo_file,\n # secondary_structure, in_vivo_notes, in_vivo_file, flag\n if columns[14] == \"1\":\n # The flag means this data is unreliable, according to the RBPDB\n # Readme files\n line = handle.readline()\n continue\n\n experimental_id = columns[0]\n\n assert len(experimental_id) > 0\n pfm_file = columns[9]\n seq_motifs = columns[4]\n pwms = []\n if pfm_file != \"\\\\N\":\n pfm_file_path = rbpdb_pfm_file_directory + pfm_file\n with open(pfm_file_path) as pfm_file_handle:\n raw_pwm_str = pfm_file_handle.read()\n pwm = str_to_pwm(raw_pwm_str, is_transpose=True)\n pwms += [pwm]\n elif seq_motifs not in (\"\\\\N\", \"\"):\n # This experiment still generated some useful data\n seq_motifs = seq_motifs.split(\";\")\n i = 0\n while i != len(seq_motifs):\n seq_motif = seq_motifs[i]\n while \")(\" in seq_motif:\n repeat_end = seq_motif.find(\")(\")\n assert seq_motif[repeat_end] == \")\"\n repeat_start = repeat_end\n while seq_motif[repeat_start] != \"(\":\n repeat_start -= 1\n\n number_start = repeat_end + 2\n assert (\n seq_motif[number_start].isdigit()\n or seq_motif[number_start] == \"n\"\n )\n number_end = number_start\n while seq_motif[number_end] != \")\":\n number_end += 1\n\n # deal with cases where the number of repeats is\n # \"15-30\". Take minimum to be conservative.\n # Note that most cases would be a single number like\n # \"15\".\n num_of_repeats = (\n min(\n [\n int(s) if s != \"n\" else\n math.ceil(\n n_repeat_req\n / (repeat_end - repeat_start - 1)\n )\n for s in\n seq_motif[number_start: number_end]\n .split(\"-\")\n ]\n )\n )\n\n seq_motif = (\n seq_motif.replace(\n seq_motif[repeat_start: number_end + 1],\n seq_motif[repeat_start + 1: repeat_end]\n * num_of_repeats\n )\n )\n\n\n maketrans = str.maketrans\n all_letters = 'wruysn'\n upper_map = maketrans(all_letters, all_letters.upper())\n seq_motif = seq_motif.translate(upper_map)\n if \"/\" in seq_motif:\n bracket_start = bracket_end = middle = (\n seq_motif.find(\"/\")\n )\n while seq_motif[bracket_start] != \"(\":\n bracket_start -= 1\n while seq_motif[bracket_end] != \")\":\n bracket_end += 1\n seq_motif_1 = (\n seq_motif.replace(\n seq_motif[bracket_start: bracket_end + 1],\n seq_motif[bracket_start + 1: middle]\n )\n )\n seq_motif_2 = (\n seq_motif.replace(\n seq_motif[bracket_start: bracket_end + 1],\n seq_motif[middle + 1: bracket_end]\n )\n )\n seq_motifs += [seq_motif_1, seq_motif_2]\n else:\n pwm = motif_to_pwm(\n seq_motif, letter_strength=letter_strength\n )\n pwms += [pwm]\n i += 1\n\n # Now we have the raw text, we convert it to pwm and add to\n # dictionary\n experimental_to_pwm_dict[experimental_id] = pwms\n line = handle.readline()\n\n return experimental_to_pwm_dict", "def get_ngramlogprobs(freqdict):\n return", "def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))", "def evaluate_ngrams(eval_dataset, trigram_counts, bigram_counts, unigram_counts, train_token_count, lambda1, lambda2):\n perplexity = 0\n\n ### YOUR CODE HERE\n def calc_prob(sentense, i, word, trigram_counts, bigram_counts, unigram_counts, train_token_count, model):\n prob = 0.0\n prev_word = sentense[i - 1]\n prev_to_prev_word = sentense[i - 2]\n\n if model == \"unigram\":\n if word in unigram_counts:\n prob = (unigram_counts[word] + 0.0) / train_token_count\n else:\n prob = (unigram_counts[word_to_num['UUUNKKK']] + 0.0) / \\\n train_token_count\n\n if model == \"bigram\":\n if (prev_word, word) in bigram_counts:\n prob = (bigram_counts[(prev_word, word)] + 0.0) / \\\n unigram_counts[prev_word]\n # print(num_to_word[prev_word] ,num_to_word[word])\n # print(bigram_counts[(prev_word, word)])\n # print(unigram_counts[prev_word])\n # print(\"---------------------------\")\n else:\n prob = 0.0\n\n if model == \"trigram\":\n if (prev_to_prev_word, prev_word, word) in trigram_counts:\n prob = (trigram_counts[(prev_to_prev_word, prev_word, word)] + 0.0) \\\n / bigram_counts[(prev_to_prev_word, prev_word)]\n # / bigram_counts[(prev_word, word)] #this according to lecture notes slide 27\n else:\n prob = 0.0\n\n return prob\n\n l = 0\n num_of_words = 0\n\n ##########3\n better_than_chance = 0\n ###########\n\n for sentense in eval_dataset:\n for i, word in enumerate(sentense[2:]):\n num_of_words += 1\n prob = lambda1 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"trigram\") + \\\n lambda2 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"bigram\") + \\\n (1 - lambda1 - lambda2) * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts,\n unigram_counts, train_token_count, \"unigram\")\n ######################################\n if prob > (1.0 / vocabsize):\n better_than_chance += 1\n #########################\n l += np.log2(prob)\n l /= num_of_words\n perplexity = 2 ** -l\n\n print(\"better_than_chance:\", (better_than_chance + 0.0) / num_of_words)\n\n ### END YOUR CODE\n return perplexity", "def compute_AttentionLRP(self, cell_state, relevance_a, output_states):\r\n\r\n\r\n\t\t#Reconstructing the concatenated encoder states\r\n\t\tmax_encoding_len = len(output_states)\r\n\t\tu = np.zeros(output_states[0].shape)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tu += cell_state[\"alpha\"][i, 0] * output_states[i]\r\n\t\ta = np.matmul(self.attentonLayer, np.concatenate([cell_state[\"lstm\"]['last_h'], u]))\r\n\r\n\t\t# LRP for the attention layer\r\n\t\tinp_a_rel = layerLRP(np.concatenate([cell_state[\"lstm\"]['last_h'], u]), self.attentonLayer, np.zeros((self.attentionSize, 1)), a, relevance_a)\r\n\r\n\t\th_relevance= inp_a_rel[:self.cellSize]\r\n\t\tu_relevance = inp_a_rel[self.cellSize:]\r\n\r\n\t\tforward_encoder_relevance = []\r\n\t\tbackward_decoder_relevance = []\r\n\r\n\r\n\t\tinput_lrp_vector = np.concatenate([cell_state[\"alpha\"][i, 0] * output_states[i] for i in range(max_encoding_len)])\r\n\t\tinput_lrp_matrix = np.concatenate([np.identity(2*self.cellSize) for i in range(max_encoding_len)], 1)\r\n\t\t#for i in range(max_encoding_len):\r\n\t\t\t#inp_c_rel = layerLRP(cell_state[\"alpha\"][i, 0] * output_states[i], np.identity(2*self.cellSize), np.zeros((2*self.cellSize, 1)), u, u_relevance, epsilon = 0.001, delta=1.0)\r\n\t\t\t#forward_encoder_relevance.append(inp_c_rel[:self.cellSize])\r\n\t\t\t#backward_decoder_relevance.append(inp_c_rel[self.cellSize:])\r\n\t\tinp_c_rel = layerLRP(input_lrp_vector, input_lrp_matrix, np.zeros((2*self.cellSize, 1)), u, u_relevance)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tforward_encoder_relevance.append(inp_c_rel[2*i*self.cellSize:(2*i+1)*self.cellSize])\r\n\t\t\tbackward_decoder_relevance.append(inp_c_rel[(2*i+1)*self.cellSize:(2*(i+1))*self.cellSize])\r\n\r\n\t\treturn h_relevance, forward_encoder_relevance, backward_decoder_relevance", "def detectionProb(efficiency, trigger=1):\n if trigger > 1.:\n raise ValueError('Trigger > 1 not implemented yet\\n')\n q = 1.0 - np.asarray(efficiency)\n\n # probability of 0 detections\n logq = np.log(q)\n logpiq = logq.sum()\n piq = np.exp(logpiq)\n\n return 1.0 - piq", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def updateResidueProbAnnotation(residueProb):\n\n for resonance in residueProb.resonanceGroup.resonances:\n updateResonanceAnnotation(resonance)", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def prob_to_llre(meth_prob):\n return math.log((meth_prob + EPSLONG) / (1 - meth_prob + EPSLONG))", "def compute_log_probability_of_text(text, char_to_ix, frequency_statistics, transition_matrix):\n t = text\n cix = char_to_ix\n fr = frequency_statistics\n tm = transition_matrix\n \n i0 = cix[t[0]]\n p = np.log(fr[i0])\n i = 0\n while i < len(t)-1:\n i1 = cix[t[i+1]]\n p += np.log(tm[i0, i1])\n i0 = i1\n i += 1\n \n return p", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def prob4():\n #get Omega, F, and initialize\n Omega = np.array([[-3/2,3/4],[0,1],[0,1/2],[0,1]])\n f = lambda x: np.exp(np.sum(-x**2/2,axis=0))/(2*np.pi)**(2)\n means, cov = np.zeros(4), np.eye(4)\n truth = scipy.stats.mvn.mvnun(list(Omega[:,0]),list(Omega[:,1]), means, cov)[0]\n domain = np.logspace(1,5,20)\n approxs = []\n error = []\n for N in domain:\n #calculate approx for various sizes of samples\n approx = mc_integrate(f,Omega[:,0],Omega[:,1],N)\n approxs.append(approx)\n #calculate relative err.\n error.append(np.abs((truth-approx)/truth))\n #PLOT it all\n plt.title(\"Error vs Sample Size\")\n plt.plot(domain,1/np.sqrt(domain),label = \"1/sqrt(N)\")\n plt.plot(domain,error,label = \"Error\")\n plt.loglog()\n plt.xlabel(\"N\")\n plt.ylabel(\"Relative Error\")\n plt.legend()\n plt.show()", "def gene_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_genes_filename: 'output file for gene-level results, use .csv',\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculated\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n snp_thr: \"threshold for the minimum number of SNPs in a gene\" = 10,\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'specify the model for the regression, one betwenn normal/gamma' = 'normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'gene level regression, model: %s' %model,\n [input_snp_filename],\n [output_genes_filename,output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores, 'SNP threshold': snp_thr})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n logging.info(\"Reading SNP file: %s,\\n\\t with %s delimiter\"%(input_snp_filename, sep))\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\n\\t Number of SNPs: %s\\n\\t Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Creates the genes table with the number of SNPs for each gene and the basic stats values\n genes=g.Genes()\n genes.initialise_genes(snps.table.copy(), snps_thr=snp_thr)\n\n output_logger.info(\"Output gene table initialised:\\nNumber of genes: %s\\n\" \\\n %(str(genes.n_genes)) )\n\n snps.set_non_annotated(genes.cut_genes, 'NonCoding')\n\n if model == 'gamma':\n result = gr.analyse_gamma(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n else:\n result = gr.analyse_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n\n logging.info(\"Saving genes table\")\n genes.table = genes.table.merge(\n result, left_index=False, left_on=\"name\", right_on=\"name\")\n\n k = genes.table.n_snps / float(N_1kG)\n genes.table[\"h2g\"] = genes.table.bg_mean.astype(\"float\") * k\n\n genes.table = genes.table.sort_values(by=[\"P\", \"bg_median\"])\n\n genes.save_table(output_genes_filename)\n\n non_coding = genes.table[genes.table.name == \"NonCoding\"]\n h2g_tot = np.sum(genes.table[\"h2g\"].values) - non_coding[\"h2g\"].values\n\n output_logger.info(\" Non coding heritability : \" +\n str(non_coding[\"h2g\"].values) + \"\\n\")\n output_logger.info(\" Coding heritability : \" + str(h2g_tot) + \"\\n\")", "def plot_likelihood(expected_posts_per_month, probability):\n bar_amount = max(10, int(5 * expected_posts_per_month * probability)) # at least 10 bars, not too long of a tail\n print(\"Generating likelihood plot\")\n distribution = [binom.pmf(option, expected_posts_per_month, probability) for option in range(bar_amount)]\n plt.bar(range(bar_amount), distribution)\n plt.xlabel(\"occurrences\")\n plt.ylabel(\"likelihood\")\n plt.title(\"Likelihood of word occurences next month\")\n plt.show()", "def annotate_ISM(data_df, REFERENCE, position_list, reference_genbank_name=\"data/covid-19-genbank.gb\"):\n seq_list = data_df['sequence'].values.tolist()\n \n seq_index = []\n index = 0\n for base in REFERENCE[1]:\n if base == '-':\n seq_index.append(index)\n else:\n index += 1\n seq_index.append(index)\n reference_local_index_map = np.array(seq_index)\n mapped_reference_index = []\n for index, entropy in position_list:\n mapped_reference_index.append((index, reference_local_index_map[index], entropy))\n REFERENCE_ISM = ''.join([REFERENCE[1][item[0]] for item in position_list])\n logging.info('Reference ISM: {}.'.format(REFERENCE_ISM))\n \n gene_dict = load_gene_dict(reference_genbank_name)\n reference_raw = REFERENCE[1].replace('-', '')\n res = OrderedDict()\n res['Ref position'] = []\n res['Entropy'] = []\n res['Gene'] = []\n res['Is silent'] = []\n res['AA position'] = []\n for align_index, ref_index, entropy in mapped_reference_index:\n codon, codon_idx, name, codon_pos = find_SNP(ref_index, gene_dict, reference_raw)\n base_freq = Counter([item[align_index] for item in seq_list]).most_common()\n for alt_base, count in base_freq:\n if alt_base != reference_raw[ref_index-1]:\n break\n if codon is None:\n if_silence = True\n else:\n alt_codon = list(codon)\n alt_codon[codon_idx] = alt_base\n alt_codon = ''.join(alt_codon)\n ref_aa = translate(codon)\n ism_aa = translate(alt_codon)\n if ref_aa == ism_aa:\n if_silence = True\n else:\n if_silence = False\n res['Ref position'].append(ref_index)\n res['Entropy'].append(entropy)\n if name is None:\n name = 'Non-coding'\n res['Gene'].append(name)\n res['Is silent'].append(if_silence)\n if codon_pos is None:\n res['AA position'].append('NaN')\n else:\n res['AA position'].append('{}{}{}'.format(ref_aa, codon_pos, ism_aa))\n annotation_df = pd.DataFrame.from_dict(res)\n return annotation_df", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def gene_finder(dna, threshold):\n\n # YOUR IMPLEMENTATION HERE", "def process_observations(message, agent):\n if not message:\n print(\"Message is empty\");\n # return None\n else:\n # # Check if joint values are in the expected order and size.\n if message.joint_names != agent['joint_order']:\n # Check that the message is of same size as the expected message.\n if len(message.joint_names) != len(agent['joint_order']):\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n\n # Check that all the expected joint values are present in a message.\n if not all(map(lambda x,y: x in y, message.joint_names,\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n print(\"Joints differ\")\n return np.array(message.actual.positions) # + message.actual.velocities\n\ndef get_jacobians(state, scara_chain, jac_solver):\n \"\"\"\n Produce a Jacobian from the urdf that maps from joint angles to x, y, z.\n This makes a 6x6 matrix from 6 joint angles to x, y, z and 3 angles.\n The angles are roll, pitch, and yaw (not Euler angles) and are not needed.\n Returns a repackaged Jacobian that is 3x6.\n \"\"\"\n # Initialize a Jacobian for scara_chain.getNrOfJoints() joint angles by 3 cartesian coords and 3 orientation angles\n jacobian = Jacobian(scara_chain.getNrOfJoints())\n # Initialize a joint array for the present self.scara_chain.getNrOfJoints() joint angles.\n angles = JntArray(scara_chain.getNrOfJoints())\n # Construct the joint array from the most recent joint angles.\n for i in range(scara_chain.getNrOfJoints()):\n angles[i] = state[i]\n # Update the jacobian by solving for the given angles.observation_callback\n jac_solver.JntToJac(angles, jacobian)\n # Initialize a numpy array to store the Jacobian.\n J = np.array([[jacobian[i, j] for j in range(jacobian.columns())] for i in range(jacobian.rows())])\n # Only want the cartesian position, not Roll, Pitch, Yaw (RPY) Angles\n ee_jacobians = J\n return ee_jacobians", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def giveMotevoParamFile(genome, wmlen, inter_dir, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior, bgorder, bgprior):\n\n ##UFE_models from genome_dict are not used anymore\n #UFEmodel_hg19 is UFE model for mammal species\n genome_dict = {}\n genome_dict['hg19'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau6:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_hg19']\n genome_dict['hg18'] = ['((((hg18:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau3:0.186713,(equCab1:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom4:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFE_mammals']\n #genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_dm3']\n genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/dm3UFEparallel/UFEmodel_dm3']\n genome_dict['mm9'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau7:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_mm9']\n\n\n sitefilepath = os.path.join(inter_dir, 'sites_' + tag)\n priorfilepath = os.path.join(inter_dir, 'priors_' + tag)\n loglikfile = os.path.join(inter_dir, 'loglik_' + tag)\n\n\n print '\\nCreate motevo parameter file %s' %tag\n print 'aligned', aligned\n if aligned:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE %s' %genome_dict[genome][0],\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'UFEwmprior %s' %200,\n 'UFEwmfile %s' %ufemodel_path,\n 'UFEwmlen %s' %wmlen,\n 'UFEprint %s' %0,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile])\n else:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE (%s: 1)' %genome,\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile]) \n\n params_path = os.path.join(inter_dir, 'motevo_TFBS_params_' + tag)\n pf = open(params_path, 'w')\n pf.write(motevo_params)\n return (params_path, sitefilepath, priorfilepath, loglikfile)", "def log_prob_of_file(filepath, model):\n vocab = set(counts_un.keys())\n tot = 0\n count = 0\n prev_prev = \"<s>\\n\"\n prev = \"<s>\\n\"\n with open(filepath) as f:\n for line in f:\n count += 2\n line = line.strip()+\"\\n\"\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n for line in [\"</s>\\n\", \"</s>\\n\"]:\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n return tot, count", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def backward(log_emlik, log_startprob, log_transmat):\n N, M = log_emlik.shape\n backward_prob = np.zeros((N,M))\n\n backward_prob[N-1, :] = 0.0\n\n for i in range(N-2,-1,-1):\n for k in range(M):\n # probability of transitioning from k to state l * probability of emitting symbol at state l at ts i+1 * recursive backward probability\n backward_prob[i,k] = logsumexp(log_transmat[k,:] + log_emlik[i+1,:] + backward_prob[i+1,:])\n\n return backward_prob", "def sentence_logprob(self, sentence):\n grams = get_ngrams(sentence, 3)\n p = 1\n\n for gram in grams:\n p *= np.longfloat(self.smoothed_trigram_probability(gram))\n\n return np.log2(p)", "def dna_probability(dna:str, gc:float, return_log=False) -> float:\n at = (1 - gc) / 2.0\n gc /= 2.0\n\n p = 1\n for l in dna:\n if l in \"AT\":\n p *= at\n elif l in \"CG\":\n p *= gc\n else:\n raise ValueError(\"You should use dna string.\")\n if return_log:\n return math.log(p, 10)\n else:\n return p", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))", "def return_likelihood(self, params):\n\n self._y_pred = self._int_spec(self._x, **params) * self._atten\n\n # Add the cascade\n if self._casc is not None:\n params_casc = deepcopy(params)\n\n # apply the weights\n if self._cr_spec is not None:\n # add units to the parameters where neccessary\n params_casc['Prefactor_CR'] *= u.Unit(\"TeV-1 cm-2 s-1\")\n params_casc['Scale_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n params_casc['Emin_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n params_casc['Emax_CR'] *= u.Unit(\"eV\").to(\"eV\") * u.eV\n self._casc.apply_spectral_weights(lambda x: self._cr_spec(x, **params_casc),\n smooth=True)\n\n else:\n # add units to the parameters where neccessary\n params_casc['Prefactor'] *= u.Unit(\"TeV-1 cm-2 s-1\")\n params_casc['Scale'] *= u.Unit(\"TeV\").to(\"eV\") * u.eV\n params_casc['Cutoff'] *= u.Unit(\"TeV\").to(\"eV\") * u.eV\n self._casc.apply_spectral_weights(lambda x: self._int_spec(x, **params_casc),\n smooth=True)\n\n # and get the flux in the ON region\n spec_halo = self._casc.get_obs_spectrum(\n region=self._on_region\n )\n # convert the units back\n flux_unit_conversion = (spec_halo.quantity.unit).to(\"TeV-1 cm-2 s-1\")\n\n # either add directly if energy bins are the same or use 1D interpolation\n if self._interp_casc:\n m = spec_halo.data[:, 0, 0] > 0.\n if not np.sum(m):\n raise ValueError(\"Predicted cascade flux is zero!\")\n interp = interp1d(np.log(spec_halo.geom.get_axis_by_name('energy').center.to(\"TeV\").value[m]),\n np.log(spec_halo.data[:, 0, 0][m] * flux_unit_conversion),\n fill_value='extrapolate', kind='cubic'\n )\n self._y_pred += np.exp(interp(np.log(self._x)))\n\n else:\n self._y_pred += spec_halo.data[:, 0, 0] * flux_unit_conversion\n\n if self._cov_inv is None:\n self._llh = -1. * ((self._y - self._y_pred) ** 2. / self._dy ** 2.).sum()\n else:\n self._llh = -1. * np.dot(self._y - self._y_pred, np.dot(self._cov_inv, self._y - self._y_pred))\n\n # add contribution from profile likelihood\n if self._llh_fermi_interp is not None:\n # change parameters to the values over which grid was interpolated\n params_llh = deepcopy(params)\n params_llh['Prefactor'] *= u.Unit(\"TeV-1 cm-2 s-1\").to(\"MeV-1 cm-2 s-1\")\n params_llh['Index'] *= -1.\n self._llh_fermi = 2. * self._llh_fermi_interp([params_llh['Cutoff'],\n -1. * params_llh['Index'],\n np.log10(params_llh['Prefactor'])])[0]\n else:\n self._llh_fermi = 0\n\n return -1. * (self._llh + self._llh_fermi)", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def test_relevance_with_itself():\n state = gen_state_cgpm(get_data_separated)\n assert np.allclose(state.relevance_probability(2, [2], 1), 1.0)", "def p(self) -> Probability:\n ...", "def _trajectory_prob(self, trajectory):\n\n # Accumulate probabilities in log space for numerical stability\n logprob = np.log(self._starting_prob(trajectory[0][0]))\n logprob += np.sum([\n np.log(self._transition_prob(s1, a, s2))\n for (s1, a), (s2, _) in zip(trajectory[:-1], trajectory[1:])\n ])\n\n return np.exp(logprob)", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def _create_log_likelihood(self, individual):\n # Get individuals data\n times = []\n observations = []\n mask = self._data[self._id_key] == individual\n data = self._data[mask][\n [self._time_key, self._obs_key, self._value_key]]\n for output in self._mechanistic_model.outputs():\n # Mask data for observable\n observable = self._output_observable_dict[output]\n mask = data[self._obs_key] == observable\n temp_df = data[mask]\n\n # Filter times and observations for non-NaN entries\n mask = temp_df[self._value_key].notnull()\n temp_df = temp_df[[self._time_key, self._value_key]][mask]\n mask = temp_df[self._time_key].notnull()\n temp_df = temp_df[mask]\n\n # Collect data for output\n times.append(temp_df[self._time_key].to_numpy())\n observations.append(temp_df[self._value_key].to_numpy())\n\n # # Count outputs that were measured\n # # TODO: copy mechanistic model and update model outputs.\n # # (Useful for e.g. control group and dose group training)\n # n_measured_outputs = 0\n # for output_measurements in observations:\n # if len(output_measurements) > 0:\n # n_measured_outputs += 1\n\n # Create log-likelihood and set ID to individual\n log_likelihood = chi.LogLikelihood(\n self._mechanistic_model, self._error_models, observations, times)\n log_likelihood.set_id(individual)\n\n return log_likelihood", "def probabilities_of_structures(sequence, structure_list, react=None):\n ensemble_energy = get_ens_energy(sequence, react = react)\n energies = [get_stru_energy(x, sequence, react = react) for x in structure_list]\n probabilities = [energy_to_proba(ensemble_energy, x) for x in energies]\n #probabilities = normalize(probabilities, norm='l1').tolist()[0]\n return [(stru,proba) for stru,proba in zip(structure_list,probabilities)]", "def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)", "def _logprob(self, sample):\n return 0, 0", "def log_probability(self, samples):\n pass", "def calc_likelihood(par_num, par_rng):\n\n likelihoods = np.zeros(np.size(par_rng))\n\n trivial_prior = trivial_prior_class()\n\n pipe = pipeline(observables_generator=hammu12,\n likelihood=likelihood,\n prior=trivial_prior,\n optimizer_class=Hamiltonian_Monte_Carlo)\n\n parameters = [0]*hammu12.get_parameter_dimension()\n for par_val in par_rng:\n parameters[par_num] = par_val\n likelihoods[par_val-par_rng[0]] = pipe._calc_posterior(parameters)\n\n np.save('data%s_RM' % (par_num), likelihoods)", "def compute_joint_probability(token_list, token_probabilities, use_log_prob=False):\n\n log_prob = 0\n\n for word in token_list:\n\n # do not allow zero probabilites\n assert word in token_probabilities\n\n if use_log_prob:\n log_prob += token_probabilities[word]\n else:\n log_prob += log10(token_probabilities[word])\n\n if use_log_prob:\n return log_prob\n\n return 10**log_prob", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def random_strings(sequence, GC_array):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in sequence:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n probabilities = []\r\n\r\n #Calculate probability of G = probability of C = %GC / 2\r\n #Calculate probability of A = probability of T = (1 - %GC) / 2\r\n\r\n #For each consecutive base in provided sequence:\r\n #1. Convert total probability to logarithm using math.log(probability, base=10)\r\n #2. Total probability to be multiplied by probability of specifically that base\r\n\r\n for i in range(len(GC_array)):\r\n prob = (AT * math.log10((1 - GC_array[i])/2)) + (GC * math.log10(GC_array[i]/2))\r\n\r\n probabilities.append('%0.3f' % prob)\r\n\r\n print(*probabilities, sep= \" \")", "def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - self.design_matrix@self.mn\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*self.mn.T@self.mn\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik", "def Log_OB_S1(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n n = nX[1]\n t = nX[2]\n\n G = np.zeros((n,t))\n Gv = np.zeros((m,n,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n #Xout,PiA= CorrectPerm(xref,x[:,:,r])\n Xout = dp(x[:,:,r])\n\n for q in range(n):\n\n a = np.sum(Xout[:,q]*xref[:,q])/np.sqrt(np.sum(xref[:,q]**2)*np.sum(Xout[:,q]**2)) # Should have unit L2 norm\n if a > 1:\n a = 1\n if a < -1:\n a = -1\n G[q,r] = np.arccos(a) # Computing the angles\n v = Xout[:,q] - a*xref[:,q]\n Gv[:,q,r] = v / (1e-12 + np.linalg.norm(v)) # Unit vector in the tangent subspace\n\n return G,Gv", "def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()", "def GoAnnot(prots, gos, onlyProts=False):\r\n with resources.open_text(\"autoprot.data\",\"Homo_sapiens.gene_info\") as d:\r\n geneInfo = pd.read_csv(d, sep='\\t')\r\n with resources.open_text(\"autoprot.data\",\"gene2go_alt\") as d:\r\n gene2go = pd.read_csv(d, sep='\\t')\r\n prots = pd.DataFrame(pd.Series([str(i).upper().split(';')[0] for i in prots]), columns=[\"Gene names\"])\r\n prots = prots.merge(geneInfo[[\"Symbol\", \"GeneID\"]], left_on=\"Gene names\", right_on=\"Symbol\", how='inner')\r\n \r\n prots = prots.merge(gene2go[[\"GeneID\", \"GO_ID\", \"GO_term\"]], on=\"GeneID\", how='inner')\r\n if onlyProts == True:\r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)]\r\n else:\r\n redProts = redProts.append(prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)])\r\n return redProts.drop_duplicates()\r\n else: \r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[prots[\"GO_term\"]==go]\r\n else:\r\n redProts = redProts.append(prots[prots[\"GO_term\"]==go])\r\n return redProts.drop_duplicates()", "def calculate_likelihood_probability(measurement, predicted_measurement, covariance):\n \n return None", "def compute_correspondence_likelihoods(self, measurement,\r\n number_of_landmarks,\r\n Qt_measurement_covariance):\r\n likelihoods = []\r\n for i in range(number_of_landmarks):\r\n likelihoods.append(\r\n self.compute_weight(measurement, i, Qt_measurement_covariance))\r\n \r\n return likelihoods", "def label_propagate_probabilistic(embeddings, seeds_map, normalize=True, **kwargs):\n words = embeddings.iw\n M = transition_matrix(embeddings, **kwargs)\n teleport_set_map={}\n for seed_key, seed_list in seeds_map.items():\n teleport_set_map[seed_key]=teleport_set(words, seed_list)\n def update_seeds(r):\n idm= np.eye(len(seeds_map))\n for seed_key, w_indices in teleport_set_map.items():\n r[w_indices] = idm[seed_key]\n r /= np.sum(r, axis=1)[:, np.newaxis]\n r = run_iterative(M, np.random.random((M.shape[0], len(seeds_map))), update_seeds, **kwargs)\n polarities={}\n for i, w in enumerate(words):\n polarities[w]=Counter()\n for seed_key in seeds_map:\n polarities[w][seed_key]=r[i][seed_key]\n if normalize:\n polarities[w]=normalize_counter(polarities[w])\n return polarities", "def build_mm_df(sralist):\n\n def convert_to_codon(nts_array):\n \"\"\"\n pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.\n This function converts nucleotide arrays to codon length (nts to codon resolution):\n \"\"\"\n \n nts_array = np.array(nts_array)\n codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3.\n\n return codon_array\n\n\n def compute_mm(mmdata):\n \"\"\"\n get per gene average multi-mapping score\n \"\"\"\n\n mm_df = pd.DataFrame(columns=['ORF', 'MM'])\n counter = 0\n\n for gene in mmdata.keys():\n current_matrix = mmdata[gene]\n current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] )\n mm_df.loc[counter] = [gene, current_avrg]\n counter += 1\n\n return mm_df\n\n\n mm_mat = {}\n mm_pct = {}\n\n N = len(sralist)\n\n for ix, dataset in enumerate(sralist):\n samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb')\n genes_list = list(samfile.references)\n print(ix, dataset)\n\n for geneID in genes_list:\n # count the coverage of genomic positions by reads in region.\n # Returns: four array.arrays of the same length in order A C G T\n # The coverage is computed per-base [ACGT]\n cov = samfile.count_coverage(geneID, read_callback='nofilter')\n # Summ all 4 arrays\n cov_sum = np.sum(cov, axis=0)\n #print(geneID, cov_sum)\n codon_cov = convert_to_codon(cov_sum)\n codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov])\n \n M = len(codon_bool)\n\n if ix == 0:\n \tmm_mat[geneID] = np.zeros((N,M)) * np.nan\n \n current_matrix = mm_mat[geneID]\n current_matrix[ix,:] = np.copy(codon_bool)\n mm_mat[geneID] = current_matrix\n\n\n mm_avrg = compute_mm(mm_mat)\n #mm_avrg.to_json('yeast_mm.json')\n #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\\t')\n\n \n mm_profile = {}\n theta_mm = 5\n for orf in mm_mat.keys():\n current_mat = mm_mat[orf]\n current_bool = np.sum(current_mat, 0) <= theta_mm\n mm_profile[orf] = current_bool\n\n with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm:\n pickle.dump(mm_profile, f_mm)\n\n\n return mm_mat, mm_avrg, mm_profile", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def ref_lamanno(\n fasta_path,\n gtf_path,\n cdna_path,\n intron_path,\n index_path,\n t2g_path,\n cdna_t2c_path,\n intron_t2c_path,\n temp_dir='tmp',\n overwrite=False,\n):\n results = {}\n if not os.path.exists(index_path) or overwrite:\n fasta_path = decompress_file(fasta_path, temp_dir=temp_dir)\n sorted_fasta_path, fasta_chromosomes = sort_fasta(\n fasta_path, os.path.join(temp_dir, SORTED_FASTA_FILENAME)\n )\n gtf_path = decompress_file(gtf_path, temp_dir=temp_dir)\n sorted_gtf_path, gtf_chromosomes = sort_gtf(\n gtf_path, os.path.join(temp_dir, SORTED_GTF_FILENAME)\n )\n logger.info('Splitting genome into cDNA at {}'.format(cdna_path))\n chromosomes = check_chromosomes(fasta_chromosomes, gtf_chromosomes)\n cdna_fasta_path = generate_cdna_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n cdna_path,\n chromosomes=chromosomes\n )\n results.update({'cdna_fasta': cdna_fasta_path})\n logger.info(\n 'Creating cDNA transcripts-to-capture at {}'.format(cdna_t2c_path)\n )\n cdna_t2c_result = create_t2c(cdna_fasta_path, cdna_t2c_path)\n results.update({'cdna_t2c': cdna_t2c_result['t2c']})\n logger.info('Splitting genome into introns at {}'.format(intron_path))\n intron_fasta_path = generate_intron_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n intron_path,\n chromosomes=chromosomes\n )\n results.update({'intron_fasta': intron_fasta_path})\n logger.info(\n 'Creating intron transcripts-to-capture at {}'.\n format(cdna_t2c_path)\n )\n intron_t2c_result = create_t2c(intron_fasta_path, intron_t2c_path)\n results.update({'intron_t2c': intron_t2c_result['t2c']})\n logger.info('Concatenating cDNA and intron FASTAs')\n combined_path = concatenate_files(\n cdna_fasta_path,\n intron_fasta_path,\n out_path=os.path.join(temp_dir, COMBINED_FILENAME),\n temp_dir=temp_dir\n )\n t2g_result = create_t2g_from_fasta(combined_path, t2g_path)\n results.update(t2g_result)\n index_result = kallisto_index(combined_path, index_path)\n results.update(index_result)\n else:\n logger.info(\n 'Skipping kallisto index because {} already exists. Use the --overwrite flag to overwrite.'\n .format(index_path)\n )\n\n return results", "def calories_protein(og, fg):\n\n return 0.994 * fg * real_extract(og, fg)", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood" ]
[ "0.64158255", "0.61762154", "0.6171898", "0.61255485", "0.6108559", "0.60751355", "0.5888883", "0.581115", "0.57994837", "0.5773769", "0.57539636", "0.5743196", "0.57412934", "0.57062215", "0.5637121", "0.5604803", "0.56041193", "0.55022335", "0.5498393", "0.543617", "0.53924245", "0.5376682", "0.5366281", "0.53575003", "0.5351058", "0.5326567", "0.53247386", "0.532451", "0.5315191", "0.53046423", "0.53007615", "0.52895355", "0.5286242", "0.5279394", "0.5272649", "0.5256475", "0.52495486", "0.5248279", "0.5241967", "0.523703", "0.52352494", "0.5231489", "0.5223748", "0.522235", "0.52157533", "0.52153933", "0.5211854", "0.52102554", "0.52069914", "0.52068955", "0.52005404", "0.51990205", "0.5191093", "0.5184872", "0.5178004", "0.515485", "0.5150197", "0.5142383", "0.51392275", "0.5138804", "0.5128439", "0.51066357", "0.51057214", "0.5103622", "0.5102116", "0.5100729", "0.5100422", "0.5100422", "0.5098312", "0.50956213", "0.50889736", "0.5086658", "0.50860214", "0.50852585", "0.50837", "0.5080683", "0.5075263", "0.5074123", "0.50736403", "0.50516945", "0.5051418", "0.5050595", "0.50469905", "0.5041392", "0.50409913", "0.50378263", "0.5036027", "0.50352263", "0.50314987", "0.5027727", "0.50216466", "0.50213647", "0.50196993", "0.5014496", "0.50070405", "0.49968448", "0.49965647", "0.49908167", "0.49891657", "0.49828002" ]
0.6003137
6
Gapfill a model using probabilistic weights
def probabilistic_gapfill(model, universal_model, reaction_probabilities, clean_exchange_rxns=True, default_penalties=None, dm_rxns=False, ex_rxns=False, **solver_parameters): universal_model = universal_model.copy() model = clean_exchange_reactions(model) if clean_exchange_rxns else model.copy() if default_penalties is None: default_penalties = {'Universal': 1, 'Exchange': 100, 'Demand': 1, 'Reverse': 75} penalties = default_penalties reactions_to_remove = [] for r in universal_model.reactions: if model.reactions.has_id(r.id): reactions_to_remove.append(r) penalties[r.id] = 0 # In the model elif r.id in reaction_probabilities: penalties[r.id] = max(0, 1 - reaction_probabilities[r.id]) * (penalties[r.id] if r.id in penalties else 1) universal_model.remove_reactions(reactions_to_remove) return cobra.flux_analysis.gapfill(model, universal_model, penalties=penalties, demand_reactions=dm_rxns, exchange_reactions=ex_rxns, **solver_parameters)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_weights(model):\n ...", "def gap2d(_w_in):\n return nn.AdaptiveAvgPool2d((1, 1))", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)", "def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)", "def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight", "def fillGap(self, X, y, T, knn):\n knnobj = neighbors.KNeighborsRegressor(knn)\n return knnobj.fit(X, y).predict(T)", "def bias_prior(self):", "def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)", "def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)", "def update_weights(self):\n\t\tpass", "def modify_weights_after_load(model):\n # Prune heads if needed\n if model.config.pruned_heads:\n model.prune_heads(model.config.pruned_heads)\n\n # Tie weights if needed\n model.tie_weights()", "def init_weights(self) -> None:\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def step(self, model):\n weights = []\n if (self.prune_scope == 'global'):\n # First collect all weights\n for step, (name, param) in enumerate(self.prune_parameters):\n scale = 1\n # Pointer to the original tensor\n tensor = param.data#.cpu().numpy()\n # Gradient-based selection\n if (self.prune_selection == 'gradient_max'):\n grad = param.grad#.cpu().numpy()\n tensor = grad[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n elif (self.prune_selection == 'gradient_min'):\n grad = 1 / (torch.abs(param.grad) + 1e-7) #.cpu().numpy()\n tensor = grad[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n #tensor = torch.abs(torch.max(tensor) - tensor) #np.max(tensor) - tensor\n # Retrieve non-pruned weights\n if (self.prune_scale == 'dimension'):\n scale = tensor.size\n if (self.prune_scale == 'normalize'):\n scale = torch.max(torch.abs(tensor))#np.max(np.abs(tensor))\n if (self.prune_scale == 'xavier'):\n scale = 1.0 / np.sqrt(2.0 / tensor.shape[0] + tensor.shape[1])\n alive = tensor[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n alive /= scale \n # Add to global weights\n weights.append(alive)\n # Flatten the whole weights\n weights = torch.cat(weights)#np.concatenate(weights)\n value = abs(weights)\n # Compute global percentile\n percentile_value = torch_percentile(value, self.percent) #np.percentile(value, self.percent)\n # Now apply the global or compute local factor\n for step, (name, param) in enumerate(self.prune_parameters):\n scale = 1\n # Pointer to the original tensor\n tensor = param.data #.cpu().numpy()\n # Gradient-based selection\n if (self.prune_selection == 'gradient_max'):\n tensor = param.grad#.cpu().numpy()\n elif (self.prune_selection == 'gradient_min'):\n tensor = 1.0 / (torch.abs(param.grad) + 1e-7)#np.abs(param.grad.cpu().numpy())\n #tensor = torch.abs(torch.max(tensor) - tensor) #np.max(tensor) - tensor\n # Compute scaling\n if (self.prune_scale == 'dimension'):\n scale = tensor.size\n if (self.prune_scale == 'normalize'):\n scale = torch.max(torch.abs(tensor)) #np.max(np.abs(tensor))\n if (self.prune_scale == 'xavier'):\n scale = 1.0 / np.sqrt(2.0 / tensor.shape[0] + tensor.shape[1])\n local_weights = tensor\n local_weights /= scale\n # We do not prune bias term\n if (self.prune_scope == 'local'):\n weights = tensor[torch.nonzero(param.data, as_tuple=True)] #tensor[np.nonzero(tensor)]\n # Retrieve non-pruned weights\n value = abs(weights)\n # Compute global percentile\n percentile_value = torch_percentile(value, self.percent)\n # Use the selection function to compute mask\n new_mask = PruningMasking.ranking(self, name, local_weights, percentile_value, self.mask[step])\n # Store the computed mask\n self.mask[step] = new_mask\n step = 0\n return model", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def proba_redefined_predict(model,X,weigh):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,model.classes_)\n\n return predict", "def proba_redefined_predict(model,X,weigh,classes=string.ascii_lowercase):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,classes)\n \n return predict", "def train_gradient_boost(self, params, num_boost_round = 50):\n print \"training GB......\"\n dtrain = xgb.DMatrix(self.X, self.y)\n model = xgb.train(params, dtrain, num_boost_round = num_boost_round)\n self.models += [model]", "def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def init_weights(self):\r\n self.embedding.weight.data.uniform_(-0.1, 0.1)\r\n self.fc.bias.data.fill_(0)\r\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def sparsify_model(model, x_test, y_test, k_sparsity, pruning='weight'):\r\n # Copying a temporary sparse model from our original\r\n sparse_model = model #tf.keras.models.clone_model(model)\r\n # sparse_model.set_weights(model.get_weights())\r\n\r\n # Getting a list of the names of each component (w + b) of each layer\r\n names = [weight.name for layer in sparse_model.layers for weight in layer.weights]\r\n # print(names)\r\n # Getting the list of the weights for each component (w + b) of each layer\r\n weights = sparse_model.get_weights()\r\n # print(weights)\r\n\r\n\r\n # Initializing list that will contain the new sparse weights\r\n newWeightList = []\r\n\r\n # Iterate over all but the final 2 layers (the softmax)\r\n for i in range(0, len(weights), 2):\r\n\r\n # print(weights[i])\r\n # print(weights[i+1])\r\n\r\n if pruning == 'weight':\r\n kernel_weights, bias_weights = weight_prune_dense_layer(weights[i],\r\n weights[i + 1],\r\n k_sparsity)\r\n elif pruning == 'unit':\r\n kernel_weights, bias_weights = unit_prune_dense_layer(weights[i],\r\n weights[i + 1],\r\n k_sparsity)\r\n else:\r\n print('does not match available pruning methods ( weight | unit )')\r\n\r\n # Append the new weight list with our sparsified kernel weights\r\n newWeightList.append(kernel_weights)\r\n\r\n # Append the new weight list with our sparsified bias weights\r\n newWeightList.append(bias_weights)\r\n\r\n # Adding the unchanged weights of the final 2 layers\r\n # for i in range(len(weights) - 2, len(weights)):\r\n # for i in range(len(weights) - 2, len(weights)):\r\n # unmodified_weight = np.copy(weights[i])\r\n # newWeightList.append(unmodified_weight)\r\n\r\n # Setting the weights of our model to the new ones\r\n sparse_model.set_weights(newWeightList)\r\n\r\n # Re-compiling the Keras model (necessary for using `evaluate()`)\r\n adam = Adam(lr=0.0004, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n sparse_model.compile(\r\n loss='mean_squared_error',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n # print((sparse_model.summary()))\r\n #\r\n # sparse_model.fit(np.expand_dims(x_test, axis=2), y_test,\r\n # batch_size=32, epochs=20, verbose=2, validation_split=0.2)\r\n #\r\n # print((sparse_model.summary()))\r\n\r\n # Printing the the associated loss & Accuracy for the k% sparsity\r\n # score = sparse_model.evaluate(np.expand_dims(x_test, axis=2), y_test, verbose=0)\r\n # print('k% weight sparsity: ', k_sparsity,\r\n # '\\tTest loss: {:07.5f}'.format(score[0]),\r\n # '\\tTest accuracy: {:05.2f} %%'.format(score[1] * 100.))\r\n\r\n\r\n return sparse_model, weights", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def weight_wrtg(self, wrtg):\n # Clear caches because weights are going to change.\n # TODO: it might be possible to not clear the caches\n # if the weight doesn't change, and re-use previous decoding.\n wrtg.ClearCaches()\n for p in wrtg.P:\n rule = p.rhs.rule\n assert isinstance(rule.features, list)\n rule.weight = self.weight_rule(rule)", "def _precompute_probabilities(self):\n\n d_graph = self.d_graph\n first_travel_done = set()\n\n nodes_generator = self.graph.nodes() if self.quiet \\\n else tqdm(self.graph.nodes(), desc='Computing transition probabilities')\n\n for source in nodes_generator:\n\n # Init probabilities dict for first travel\n if self.PROBABILITIES_KEY not in d_graph[source]:\n d_graph[source][self.PROBABILITIES_KEY] = dict()\n\n for current_node in self.graph.neighbors(source):\n\n # Init probabilities dict\n if self.PROBABILITIES_KEY not in d_graph[current_node]:\n d_graph[current_node][self.PROBABILITIES_KEY] = dict()\n\n unnormalized_weights = list()\n first_travel_weights = list()\n d_neighbors = list()\n\n # Calculate unnormalized weights\n for destination in self.graph.neighbors(current_node):\n\n p = self.sampling_strategy[current_node].get(self.P_KEY,\n self.p) if current_node in self.sampling_strategy else self.p\n q = self.sampling_strategy[current_node].get(self.Q_KEY,\n self.q) if current_node in self.sampling_strategy else self.q\n\n if destination == source: # Backwards probability\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / p\n elif destination in self.graph[source]: # If the neighbor is connected to the source\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1)\n else:\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / q\n\n # Assign the unnormalized sampling strategy weight, normalize during random walk\n unnormalized_weights.append(ss_weight)\n if current_node not in first_travel_done:\n first_travel_weights.append(self.graph[current_node][destination].get(self.weight_key, 1))\n d_neighbors.append(destination)\n\n # Normalize\n unnormalized_weights = np.array(unnormalized_weights)\n d_graph[current_node][self.PROBABILITIES_KEY][\n source] = unnormalized_weights / unnormalized_weights.sum()\n\n if current_node not in first_travel_done:\n unnormalized_weights = np.array(first_travel_weights)\n d_graph[current_node][self.FIRST_TRAVEL_KEY] = unnormalized_weights / unnormalized_weights.sum()\n first_travel_done.add(current_node)\n\n # Save neighbors\n d_graph[current_node][self.NEIGHBORS_KEY] = d_neighbors", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def propose_patch(self, weight_bounds, learn_rate=1.0):\n in_dims, mid_dims, _, _ = weight_bounds.shape\n\n best_index = (None, None)\n best_constraints = -1\n best_delta = 0.0\n indices = itertools.product(range(in_dims), range(mid_dims))\n for in_dim, mid_dim in tqdm(indices, total=(in_dims * mid_dims),\n desc=\"Computing Patch\"):\n bounds = weight_bounds[in_dim, mid_dim, :, :]\n # We focus on the bounds that are non-NaN\n non_nan_bounds = bounds[~np.isnan(bounds[:, 0])]\n if len(non_nan_bounds) < best_constraints:\n continue\n lower, upper, n_met = self.interval_MAX_SMT(non_nan_bounds)\n\n if n_met <= best_constraints:\n continue\n best_constraints = n_met\n best_index = (in_dim, mid_dim)\n\n if lower <= 0.0 <= upper:\n best_delta = 0.0\n else:\n # True if the interval suggests to increase the weight.\n is_increase = lower > 0.0\n # If the interval suggests to increase the weight, suggest a\n # delta slightly above lower. Otherwise, suggest one slightly\n # below upper. Either way, we're trying to stay as close to 0\n # as possible.\n ratio = 0.1 if is_increase else 0.9\n best_delta = lower + (ratio * (upper - lower))\n if not np.isfinite(best_delta):\n eps = 0.1\n if is_increase: # => upper == np.Infinity\n assert np.isfinite(lower + eps)\n best_delta = lower + eps\n elif upper < 0.0: # => lower == -np.Infinity\n assert np.isfinite(upper - eps)\n best_delta = upper - eps\n else:\n assert False\n assert np.isfinite(best_delta)\n print(\"Would be satisfying\", best_constraints, \"constraints.\")\n print(\"Updating weight\", best_index)\n best_delta *= learn_rate\n return best_index, best_delta, best_constraints", "def policy_gamble (self):\n\t\tidx = self.idx \t\t\t\t# internal time index of state\n\t\tprobs = self.probs\t\t\t# prob of reward for an action\n\t\tbeta = self.beta\t\t\t# inverse temp \n\n\t\t# softmax\n\t\tAct = beta*self.Q[idx]\n\t\tp = 1./(1. + np.exp(-Act))\t# probability of gamble\n\t\tself.SM[idx] = p\n\n\t\t# decide whether to take gamble based on p\n\t\trnd = np.random.random_sample()\n\t\tif rnd < p:\n\t\t\tC = 1\t# gamble\n\t\telse:\n\t\t\tC = 0\t# no gamble\n\t\tself.C[idx] = C\n\n\t\t# no gamble\n\t\tif C == 0:\t\n\t\t\treward = 0\t\t # gamble reward encoded relative to reward\n\t\t\tself.R[idx] = -1 # rewarded sure thing, coded as -1\n\t\t\tself.PE[idx] = 0 # no PE, get the thing you expected\n\t\t# gamble\n\t\telse:\n\t\t\t# decide whether a reward is delivered\n\t\t\treward = np.random.binomial(size=1, n=1, p=probs)[0]\n\t\t\tself.R[idx] = reward # indicator that reward was received\n\t\t\tif reward == 0:\n\t\t\t\treward = self.l_mag\n\t\t\telse:\n\t\t\t\treward = self.r_mag\n\t\t\tself.PE[idx] = reward - self.Q[idx]", "def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n mean = 0\n std_dev = 0.0001\n #print(in_features)\n #print(out_features)\n # create weight matrices\n weight = np.random.normal(mean, std_dev, (out_features, in_features))\n #print(weight.shape)\n grad_weight = np.zeros((in_features, out_features))\n\n # create biases (in batches)\n bias = np.zeros(out_features)\n grad_bias = np.zeros(out_features)\n\n self.params = {'weight': weight, 'bias': bias}\n self.grads = {'weight': bias, 'bias': grad_bias}\n\n ########################\n # END OF YOUR CODE #\n #######################", "def train_gp_model(train_data, kernel='rbf', warp=None, ard=False, \n params_file=None, initial_y=0, likelihood='gaussian'):\n train_feats = train_data[:, :-1]\n train_labels = train_data[:, -1:]\n\n # We first build the kernel\n if kernel == 'rbf':\n k = GPflow.kernels.RBF(17, ARD=ard)\n elif kernel == 'mat32':\n k = GPflow.kernels.Matern32(17, ARD=ard)\n elif kernel == 'mat52':\n k = GPflow.kernels.Matern52(17, ARD=ard)\n\n # The likelihood\n if likelihood == 'gaussian':\n ll = GPflow.likelihoods.Gaussian()\n elif likelihood == 'student':\n ll = GPflow.likelihoods.StudentT(deg_free=4.0)\n #ll.scale = 100.0\n\n # Now we build the warping function\n if warp == 'tanh1':\n w = GPflow.warping_functions.TanhFunction(n_terms=1)\n w.c = -1.0\n elif warp == 'tanh2':\n w = GPflow.warping_functions.TanhFunction(n_terms=2)\n w.c = [-1.0, -2.0]\n elif warp == 'tanh3':\n w = GPflow.warping_functions.TanhFunction(n_terms=3)\n w.c = [-1.0, -2.0, -3.0]\n elif warp == 'log':\n w = GPflow.warping_functions.LogFunction()\n\n if warp is not None and 'tanh' in warp:\n #w.a.transform = GPflow.transforms.Exp()\n #w.b.transform = GPflow.transforms.Exp()\n #w.d.transform = GPflow.transforms.Exp()\n w.d.fixed = True\n\n\n # Finally we instantiate the model\n if warp is None:\n gp = GPflow.vgp.VGP(train_feats, train_labels, k, ll)\n else:\n gp = GPflow.warped_gp.WarpedGP(train_feats, train_labels, k, warp=w, likelihood=ll)\n gp.median = True\n\n # TODO: initialize models with previous runs\n\n # Now we optimize\n #gp.optimize_restarts(num_restarts=10, max_iters=200, robust=True)\n\n gp.optimize(max_iters=100)\n # Sometimes we get warp hypers == 0 and this result in errors\n # in the prediction. We clip the values to a small value here\n if warp is not None and warp != 'log':\n clip_hypers(gp)\n return gp", "def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()", "def _initialize_weights(self):\n pass", "def mixed_prob( means,stds,weights,validt):", "def initialize_weights_and_bias(self, X_train):\n n_samples, n_features = np.shape(X_train)\n n_output = 1 \n \n # This is the numeber of gridcells and we want to make one prediction pr cell. \n # It this doesn't work calculate the number of griddcells.\n\n self.b_h = [] #np.ones((self.n_hidden_layers, self.n_hidden[0]))\n self.W_h = []\n\n for i in range(len(self.n_hidden)):\n if (i == 0):\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden[0])))\n self.b_h.append(np.ones(self.n_hidden[0]))\n else:\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[i-1], self.n_hidden[i])))\n self.b_h.append(np.ones(self.n_hidden[i])) \n \n self.b_out = [1]\n self.W_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[-1], n_output))", "def update_model(self) -> torch.Tensor:\n # PER needs beta to calculate weights\n samples = self.memory.sample_batch(self.beta)\n weights = torch.FloatTensor(\n samples[\"weights\"].reshape(-1, 1)\n ).to(self.device)\n indices = samples[\"indices\"]\n \n # 1-step Learning loss\n elementwise_loss = self._compute_dqn_loss(samples, self.gamma)\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n \n # N-step Learning loss\n # we are gonna combine 1-step loss and n-step loss so as to\n # prevent high-variance. The original rainbow employs n-step loss only.\n if self.use_n_step:\n gamma = self.gamma ** self.n_step\n samples = self.memory_n.sample_batch_from_idxs(indices)\n elementwise_loss_n_loss = self._compute_dqn_loss(samples, gamma)\n elementwise_loss += elementwise_loss_n_loss\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n\n self.optimizer.zero_grad()\n loss.backward()\n clip_grad_norm_(self.dqn.parameters(), 10.0)\n self.optimizer.step()\n \n # PER: update priorities\n loss_for_prior = elementwise_loss.detach().cpu().numpy()\n new_priorities = loss_for_prior + self.prior_eps\n self.memory.update_priorities(indices, new_priorities)\n \n # NoisyNet: reset noise\n self.dqn.reset_noise()\n self.dqn_target.reset_noise()\n\n return loss.item()", "def get_weights(self):", "def gp_fit(weights, params, task):\n print('GP training ...')\n # Load and rescale parameters\n # file_name = '../Data/lhc_512_5.txt'\n # params = np.loadtxt(file_name)\n params, tmean, tmult = rescale(params)\n\n # Set the kernel\n # kernel = GPy.kern.Matern52(input_dim=params.shape[1], variance=.01, lengthscale=.1)\n kernel = GPy.kern.Matern52(input_dim=params.shape[1])\n\n # GP Regression\n model = GPy.models.GPRegression(params, weights, kernel=kernel)\n model.optimize()\n\n # Save model\n nparams = params.shape[1]\n ntrain = weights.shape[1]\n model.save_model('../Data/GPmodel/'+task+'gpfit_'+str(ntrain)+'_'+str(nparams), compress=True, save_data=True)\n return model, tmean, tmult", "def soft_update(self, local_model, target_model):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(target_weights), \"Local and target model parameters must have the same size.\"\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n target_model.set_weights(new_weights)", "def test_get_prob_needs_trimming(self):\n # for a family 1 circuit:\n level_type_array = [g.LEVEL_TYPES.RANDOM]\n W = 2\n G = 2\n fg = .5\n X = None\n fx = None\n desired_prob = 1.0 - ((1.0 - ((1 - fg) ** G)) ** 2)\n self.assertEqual(g.get_prob_needs_trimming(W, G, fg, X, fx,\n level_type_array),\n desired_prob)", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def initializeWeights(mlModel):\n\n randInitRange = mlModel.randInitRange\n\n numFeatures = mlModel.features.shape[1]\n mlModel.weights = np.random.rand(numFeatures+1) * 2 * randInitRange - randInitRange\n\n return mlModel", "def update_weights(self, example):\n pred = self.predict(example)\n if pred != example.label:\n self.weights[example.label] = self.weights[example.label] + example.fvector\n self.weights[pred] = self.weights[pred] - example.fvector", "def soft_update(self, local_model, target_model, tau):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = tau * local_weights + (1 - tau) * target_weights\n target_model.set_weights(new_weights)", "def gaussian_weights(self, pad, feather):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt = np.zeros_like(dist)\n i_feather = (dist >= W/2 - pad - feather) & ( dist <= W/2 -pad )\n wt_feather = np.exp(-((xy[i_feather]-xy0)/(feather/2.))**2)\n wt[ i_feather ] = wt_feather\n wt[ dist <= W/2 - pad - feather ] = 1\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.word.bias.data.fill_(0)\n self.word.weight.data.uniform_(-0.1, 0.1)", "def weights_decay(self):\n for param_group in self.optimizer.param_groups:\n for param in param_group['params']:\n param.data = param.data.add(-1.*self.weights_decay * param_group['lr'], param.data)", "def build_guided_model():\n if \"GuidedBackProp\" not in ops._gradient_registry._registry:\n @ops.RegisterGradient(\"GuidedBackProp\")\n def _GuidedBackProp(op, grad):\n dtype = op.inputs[0].dtype\n return grad * tf.cast(grad > 0., dtype) * \\\n tf.cast(op.inputs[0] > 0., dtype)\n\n g = tf.get_default_graph()\n with g.gradient_override_map({'Relu': 'GuidedBackProp'}):\n new_model = build_model()\n return new_model", "def boosting(train_data, dim, t):\n w = []\n w.append([float(1) / float(len(train_data))] * len(train_data))\n\n # Store models in m, models are stored as a tuple with the w_vector as well\n # as the t_vector\n\n m = []\n\n for i in range(t):\n print(\"Iteration \" + str(i + 1) + str(\":\"))\n t_vec, w_vec, error = binary_classifier(train_data, dim, w[i])\n alpha = 0.5 * math.log(float(1 - error) / float(error))\n print(\"Error = \" + str(error))\n print(\"Alpha = \" + str(alpha))\n if error >= 0.5:\n break\n # Add model only if it has error rate less than 0.5\n m.append((t_vec, w_vec, alpha))\n\n is_increase_weights_printed = False\n is_decrease_weights_printed = False\n factor_to_increase = 0\n factor_to_decrease = 0\n # Update weights by figuring out which points that are misclassified\n w.append([0] * len(train_data))\n for j in range(len(train_data)):\n if np.dot(train_data[j][0:dim], w_vec) > t_vec:\n if train_data[j][dim] == -1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n else:\n if train_data[j][dim] == 1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n\n print(\"Factor to increase weights = \" + str(factor_to_increase))\n print(\"Factor to decrease weights = \" + str(factor_to_decrease))\n\n return m", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)", "def backward(model, xs, h1s, h2s, errs):\n # errs is the gradients of output layer for the minibatch\n # dW4 = (np.dot(h3s.T, errs))/xs.shape[0]\n\n # # Get gradient of hidden layer\n # dh_3 = np.dot(errs, model['W4'].T)\n # dh_3[h3s <= 0] = 0\n \n # # The bias \"neuron\" is the constant 1, we don't need to backpropagate its gradient\n # # since it has no inputs, so we just remove its column from the gradient\n # dh_3 = dh_3[:, :-1]\n \n # Gradient for weights H1 -> H2\n dW3 = (np.dot(h2s.T, errs)) / xs.shape[0]\n\n dh_2 = np.dot(errs, model['W3'].T)\n dh_2[h2s <= 0] = 0\n \n # The bias \"neuron\" is the constant 1, we don't need to backpropagate its gradient\n # since it has no inputs, so we just remove its column from the gradient\n dh_2 = dh_2[:, :-1]\n \n # Gradient for weights H1 -> H2\n dW2 = (np.dot(h1s.T, dh_2)) / xs.shape[0]\n \n # Gradient of h1\n dh_1 = np.dot(dh_2, model['W2'].T)\n dh_1[h1s <= 0] = 0\n\n # Again, drop the bias column\n dh_1 = dh_1[:, :-1]\n \n # Add the 1 to the data, to compute the gradient of W1\n ones = np.ones((xs.shape[0], 1))\n xs = np.hstack([xs, ones])\n\n dW1 = (np.dot(xs.T, dh_1))/xs.shape[0]\n\n return dict(W1=dW1, W2=dW2, W3=dW3)", "def init_weight(self):\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def init_weights_(self):\n raise NotImplementedError", "def random_weight_init(_p: Perceptron):\n\n _p.weights = [rd.choice([1-rd.random(), -1+rd.random()]) for _ in range(_p.input_size)]", "def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())", "def dropout_forward_prop(X, weights, L, keep_prob):\n rnd = np.random.rand\n cache = {}\n cache[\"A0\"] = X\n for j in range(L):\n z = np.dot(weights[\"W\" + str(j+1)], cache[\n \"A\" + str(j)]) + (weights[\"b\" + str(j+1)])\n if j == L-1:\n \"\"\" The last layer use the softmax activation function\"\"\"\n ACT = np.exp(z)/sum(np.exp(z))\n else:\n \"\"\"All layers except the last use the tanh activation function\"\"\"\n ACT = np.tanh(z)\n d3 = (rnd(ACT.shape[0], ACT.shape[1]) < keep_prob).astype(int)\n \"\"\"ACT = np.multiply(ACT, d3)\"\"\"\n ACT *= d3\n ACT /= keep_prob\n cache[\"D\"+str(j+1)] = d3\n\n cache[\"A\"+str(j+1)] = ACT\n return cache", "def bd_process_model_probability(data,\n fitness_prior=flat_fitness_prior,\n N_w_prior=flat_N_w_prior,\n mutation_object=True):\n\n if mutation_object is True:\n trajectories = data.data\n else:\n trajectories = data\n\n ind_likelihood = []\n for traj in trajectories:\n int_s = []\n for s in fitness_prior[0, :]:\n int_N_w = []\n for N_w in N_w_prior[0, :]:\n int_N_w.append(\n bd_process_conditional_likelihood_s_N(traj,\n s=s, N_w=N_w)\n )\n int_s.append(np.trapz(x=N_w_prior[0, :],\n y=int_N_w*N_w_prior[1, :]))\n\n marginalised_likelihood = np.trapz(x=fitness_prior[0, :],\n y=int_s*fitness_prior[1, :])\n ind_likelihood.append(marginalised_likelihood)\n \n mutation_prob = np.product(ind_likelihood)\n\n if mutation_object is True:\n # return updated model_comparison object \n data.bd_prob = mutation_prob\n return data\n else:\n # return marginalised likelihood.\n return mutation_prob", "def freeze_params(model: nn.Module):\n for par in model.parameters():\n par.requires_grad = False", "def initialize_weights_xavier(self):\n\t\tself.weights = [np.random.uniform(-1/sqrt(size1), 1/sqrt(size1)) for size1, size2 in zip(self.sizes[:-1], self.sizes[1:])]\n\t\tself.biases = [np.zeros([size, ]) for size in self.sizes[1:]]", "def freeze_model(model):\n for param in model.parameters():\n param.requires_grad = False", "def propagate(w, b, X, Y):\n\n m = X.shape[1];\n\n # A.shape should be (1, m), m -- number of training examples\n A = sigmoid(np.dot(w.T, X) + b);\n # print(\"A = \" +str(A))\n\n # single training exmaple cost = - (y * log(A) + (1-y) * log(1-A))\n # cost = (-1/m) * (np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T));\n cost = (-1/m)*(np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T));\n # print(\"cost in function: \" +str(cost))\n\n # Backward propagation \n dw = (1/m) * np.dot(X, (A - Y).T)\n # np.sum() : axis = 0 means along the column and axis = 1 means working along the row.\n db = (1/m) * ((A - Y).sum(1))\n\n # dw.shape should be (num_px * nump_px * 3, 1), db should be a scalar\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n\n # np.squeeze(arr) -- remove one dimentional entry from the shape of given array. \n # -- arr: input array\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw, \"db\": db}\n \n return grads, cost", "def test_input_warped_gp_identity(self):\n k = GPy.kern.RBF(1)\n m = GPy.models.GPRegression(self.X, self.Y, kernel=k)\n m.optimize()\n preds = m.predict(self.X)\n\n warp_k = GPy.kern.RBF(1)\n warp_f = GPy.util.input_warping_functions.IdentifyWarping()\n warp_m = GPy.models.InputWarpedGP(self.X, self.Y, kernel=warp_k, warping_function=warp_f)\n warp_m.optimize()\n warp_preds = warp_m.predict(self.X)\n\n np.testing.assert_almost_equal(preds, warp_preds, decimal=4)", "def weight(self):", "def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()", "def fit(self, w):\n w_former = w\n w_next = w\n w_t = w\n w_t_100 = w\n w_diff = 10000\n i = 0\n #tim_beg = t.time()\n # use two part to calculate the a(w,w0):calculate the gradient using regular or SDG, batch = 10\n # calculate the gradient and update the w,w0\n while i < 10000 and np.abs(w_diff) > 0.00001:\n loss_func = self.lost\n grads = self.gradient(loss_func)\n # calculate the y_pred(eta)\n w_next = w_former - grads(w_former) / (10000)\n k =self.lost(w_next) - self.lost(w_former)\n m = np.dot(w_next-w_former, grads(w_former).T)\n if i != 0 and i % 100 == 0:\n w_t = w_t_100\n w_t_100 = w_next\n w_diff = 1 / len(w) * (np.sum(np.abs(w_t_100 - w_t)))\n i_loss = self.lost(w_next)\n print(\"Iteration < %d > with loss < %f >\" % (i, i_loss))\n #self.los_plt.append(i_loss)\n #tim = t.time() - tim_beg\n #self.tim.append(tim)\n i += 1\n w_former = w_next\n #plt.plot(self.tim, self.los_plt)\n #plt.xlabel(\"time\")\n #plt.ylabel('loss')\n #plt.show()\n if i >= 10000:\n print(\"~Optimization stops because finishing iteration~\")\n if np.abs(w_diff) <= 0.00001:\n print(\"~Optimization stops because of difference between weights are less than 0.00001~\")\n self.w_result = w_next", "def update_weights(self, gradient_steps):\n for gradient_step in range(int(gradient_steps)):\n states, actions, rewards, dones, new_states = tf.numpy_function(\n self.concat_buffer_samples, [], self.batch_dtypes\n )\n self.update_critic_weights(states, actions, new_states, dones, rewards)\n if gradient_step % self.policy_delay == 0:\n self.update_actor_weights(states)\n self.sync_target_models()", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def set_flat_weights(model, flat_weights):\n k = 0\n for p in model.parameters():\n n = p.data.numel()\n p.data.copy_(flat_weights[k : (k + n)].view_as(p.data))\n k += n\n assert k == flat_weights.numel()", "def conv_net_model(x, keep_prob):\n\n # Could use tf.truncated_normal() instead of tf.random_normal(),\n # see what is the best choice\n # note tf.truncated() is used in the FCN implementation\n with tf.variable_scope('weights'):\n weights = {'W_d_conv1': utils.weight_def([3, 3, NUM_CHANNELS, 64], stddev=STD_VAR_INIT, name = 'W_d_conv1'),\n 'W_d_conv2': utils.weight_def([3, 3, 64, 64], stddev=STD_VAR_INIT, name = 'W_d_conv2'),\n 'W_d_conv3': utils.weight_def([3, 3, 64, 128], stddev=STD_VAR_INIT, name = 'W_d_conv3'),\n 'W_d_conv4': utils.weight_def([3, 3, 128, 128], stddev=STD_VAR_INIT, name = 'W_d_conv4'),\n 'W_d_conv5': utils.weight_def([3, 3, 128, 256], stddev=STD_VAR_INIT, name = 'W_d_conv5'),\n 'W_d_conv6': utils.weight_def([3, 3, 256, 256], stddev=STD_VAR_INIT, name = 'W_d_conv6'),\n\n 'W_deconv1': utils.weight_def([3, 3, 256, 256], stddev=STD_VAR_INIT, name = 'W_deconv1'),\n 'W_u_conv1': utils.weight_def([3, 3, 384, 256], stddev=STD_VAR_INIT, name = 'W_u_conv1'),\n 'W_u_conv2': utils.weight_def([3, 3, 256, 128], stddev=STD_VAR_INIT, name = 'W_u_conv2'),\n\n 'W_deconv2': utils.weight_def([3, 3, 128, 128], stddev=STD_VAR_INIT, name = 'W_deconv2'),\n 'W_u_conv3': utils.weight_def([3, 3, 192, 128], stddev=STD_VAR_INIT, name = 'W_u_conv3'),\n 'W_u_conv4': utils.weight_def([3, 3, 128, 64], stddev=STD_VAR_INIT, name = 'W_u_conv4'),\n\n 'W_convout': utils.weight_def([1, 1, 64, NUM_CLASSES], stddev=STD_VAR_INIT, name = 'W_convout')}\n\n with tf.variable_scope('biases'):\n biases = {'B_d_conv1': utils.bias_def([64], name = 'B_d_conv1'),\n 'B_d_conv2': utils.bias_def([64], name = 'B_d_conv2'),\n 'B_d_conv3': utils.bias_def([128], name = 'B_d_conv3'),\n 'B_d_conv4': utils.bias_def([128], name = 'B_d_conv4'),\n 'B_d_conv5': utils.bias_def([256], name = 'B_d_conv5'),\n 'B_d_conv6': utils.bias_def([256], name = 'B_d_conv6'),\n 'B_deconv1': utils.bias_def([256], name = 'B_deconv1'),\n 'B_u_conv1': utils.bias_def([256], name = 'B_u_conv1'),\n 'B_u_conv2': utils.bias_def([128], name = 'B_u_conv2'),\n 'B_deconv2': utils.bias_def([128], name = 'B_deconv2'),\n 'B_u_conv3': utils.bias_def([128], name = 'B_u_conv3'),\n 'B_u_conv4': utils.bias_def([64], name = 'B_u_conv4'),\n 'B_convout': utils.bias_def([NUM_CLASSES], name = 'B_convout')}\n\n # IMPORTANT STEP\n # From FCN implementation:\n # shape = tf.shape(data)\n # deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\n #data = tf.reshape(data, shape=[BATCH_SIZE, IMG_WIDTH, IMG_HEIGHT, NUM_CHANNELS])\n\n # Going down\n # First block\n d_conv_dropout_relu1 = utils.conv2d_dropout_relu(x, weights['W_d_conv1'], biases['B_d_conv1'], keep_prob = keep_prob,\\\n name = 'd_conv_1')\n\n d_conv_dropout_relu2 = utils.conv2d_dropout_relu(d_conv_dropout_relu1, weights['W_d_conv2'], biases['B_d_conv2'], keep_prob = keep_prob,\\\n name = 'd_conv_2')\n\n max_pool1 = utils.maxpool2d(d_conv_dropout_relu2, name = 'max_pool')\n\n # Second block\n d_conv_dropout_relu3 = utils.conv2d_dropout_relu(max_pool1, weights['W_d_conv3'], biases['B_d_conv3'], keep_prob = keep_prob,\\\n name = 'd_conv_3')\n\n d_conv_dropout_relu4 = utils.conv2d_dropout_relu(d_conv_dropout_relu3, weights['W_d_conv4'], biases['B_d_conv4'], keep_prob = keep_prob,\\\n name = 'd_conv_4')\n\n max_pool2 = utils.maxpool2d(d_conv_dropout_relu4, name = 'max_pool2')\n\n\n # Transition\n d_conv_dropout_relu5 = utils.conv2d_dropout_relu(max_pool2, weights['W_d_conv5'], biases['B_d_conv5'], keep_prob = keep_prob,\\\n name='d_conv_5')\n\n d_conv_dropout_relu6 = utils.conv2d_dropout_relu(d_conv_dropout_relu5, weights['W_d_conv6'], biases['B_d_conv6'], keep_prob = keep_prob,\\\n name ='d_conv_6')\n\n\n # Going up\n # First block\n deconv_relu1 = utils.deconv2d_relu(d_conv_dropout_relu6, weights['W_deconv1'], biases['B_deconv1'], name = 'deconv1')\n\n concat1 = tf.concat([deconv_relu1, d_conv_dropout_relu4], 3, name = 'concat1')\n\n u_conv_dropout_relu1 = utils.conv2d_dropout_relu(concat1, weights['W_u_conv1'], biases['B_u_conv1'], keep_prob = keep_prob,\\\n name ='u_conv_1')\n\n u_conv_dropout_relu2 = utils.conv2d_dropout_relu(u_conv_dropout_relu1, weights['W_u_conv2'], biases['B_u_conv2'], keep_prob = keep_prob,\\\n name ='u_conv_2')\n\n # Second block\n deconv_relu2 = utils.deconv2d_relu(u_conv_dropout_relu2, weights['W_deconv2'], biases['B_deconv2'], name = 'deconv2')\n\n concat2 = tf.concat([deconv_relu2, d_conv_dropout_relu2], 3)\n\n u_conv_dropout_relu3 = utils.conv2d_dropout_relu(concat2, weights['W_u_conv3'], biases['B_u_conv3'], keep_prob = keep_prob,\\\n name ='u_conv_3')\n\n u_conv_dropout_relu4 = utils.conv2d_dropout_relu(u_conv_dropout_relu3, weights['W_u_conv4'], biases['B_u_conv4'], keep_prob = keep_prob,\\\n name ='u_conv_4')\n\n # Convout\n convout = utils.conv2d_dropout_relu(u_conv_dropout_relu4, weights['W_convout'], biases['B_convout'], keep_prob = tf.constant(1.0),\\\n name = 'convout')\n\n # Storing variables into a variables list\n # weights:\n variables = []\n for _, item in weights.items():\n variables.append(item)\n\n for _, item in biases.items():\n variables.append(item)\n\n\n return convout, variables", "def back_propagate(self, reward, maxQ):\n\n error = self.alpha * (reward + self.gamma*maxQ - self.value)\n #logging.debug(\"error is now %s\" % (error))\n\n # sigmoid derivate is sigmoid(x) * (1 - sigmoid(x) )\n dsig = self.value * (1 - self.value)\n\n gradient = error * dsig\n #logging.debug(\"gradient is now: %s\" % (gradient))\n\n self.weigths = np.add( self.weights, np.multiply(gradient, self.weights) )\n # self.weights = [gradient * w + w for w in self.weights]", "def make_weights_for_balanced_classes(self):\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n # labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n labels = self.get_labels()\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight", "def updateWeights(self, message):\n prefWeights = [self.prefWghts1.GetValue(), \n self.prefWghts2.GetValue(), \n self.prefWghts3.GetValue()]\n\n self.model.setWeights(prefWeights, \n self.Prefs.GetValue(), \n self.ExcessCap.GetValue(), \n self.CongPenalty.GetValue(), \n self.DeptFairness.GetValue(), \n self.Back2Back.GetValue())", "def initWeights(self):\n self.weights = []\n self.bias = []\n for i, dim in enumerate(self.dimensions[1:]):\n self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))\n self.bias.append(np.random.uniform(-1,1,dim))", "def prior(kernel_size, bias_size): #removed dtype=None, unused argument\n number = kernel_size + bias_size\n prior_model = keras.Sequential(\n [\n tfp.layers.DistributionLambda(\n lambda t: tfp.distributions.MultivariateNormalDiag(\n loc=tf.zeros(number), scale_diag=tf.ones(number)\n )\n )\n ]\n )\n return prior_model", "def dropout_forward_prop(X, weights, L, keep_prob):\n cache = {\"A0\": X}\n for el in range(1, L + 1):\n Z = np.matmul(\n weights[\"W\" + str(el)],\n cache[\"A\" + str(el - 1)]\n ) + weights[\"b\" + str(el)]\n if el == L:\n t = np.exp(Z)\n A = t / np.sum(t, axis=0, keepdims=True)\n cache[\"A\" + str(el)] = A\n else:\n A = np.tanh(Z)\n D = np.random.rand(A.shape[0], A.shape[1])\n D = np.where(D < keep_prob, 1, 0)\n A = np.multiply(A, D)\n cache[\"D\" + str(el)] = D\n cache[\"A\" + str(el)] = A / keep_prob\n return cache", "def setup_train(self, input_data, target):\n \n W_my = self.setup_print(self.W, \"intial W\")\n \n # The weights with the random adjustment are <batch_size, from, to>, so\n # we inflate W here, too.\n W_exp = tf.tile(tf.expand_dims(W_my, 0), [self.config['batch_size'], 1, 1]) # <batch_size, from, to>\n\n # 1. Actual output\n output = self.setup_forward(W_exp, input_data, prefix=\"org\") # <batch_size, (timesteps,) output>\n loss = self.setup_loss(output, target, prefix=\"org\") # <batch_size>\n loss = self.setup_print(loss, \"loss\")\n \n # 2. Test output in the environment\n # TODO Do the random test around the decayed weights\n # NOTE: W_adj_source keeps its value inside a single run\n # https://stackoverflow.com/questions/52213325/are-tensorflow-random-values-guaranteed-to-be-the-same-inside-a-single-run\n W_adj = self.W_adj_source # <batch_size, from, to>\n W_adj = self.setup_print(W_adj, \"W_adj\")\n \n output_adj = self.setup_forward(W_exp + W_adj, input_data, prefix=\"adj\")\n loss_adj = self.setup_loss(output_adj, target, prefix=\"adj\")\n loss_adj = self.setup_print(loss_adj, \"loss_adj\")\n # improvement is positive when we go from large error to small error\n improvement = loss - loss_adj # <batch_size>\n improvement = self.setup_print(improvement, \"improvement\")\n \n # Update the weights\n improvement = tf.expand_dims(tf.expand_dims(improvement, 1), 2) # <batch_size, 1, 1>\n weight_update = W_adj * improvement # <batch_size, from, to>\n weight_update = self.setup_print(weight_update, \"weight_update\")\n weight_update = tf.reduce_mean(weight_update, axis=0) # <from, to>\n \n weight_update = self.setup_print(weight_update, \"weight_update_reduced\")\n weight_update = self.W.assign_add(weight_update)\n \n # Get the average loss\n loss_avg = tf.reduce_mean(loss, axis=0)\n \n return weight_update, loss_avg", "def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")", "def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights", "def initializeWeightsToZero(self):\n\t\t## YOUR CODE BELOW\n\t\t\n\t\tutil.raiseNotDefined()\n\t\treturn", "def gpbandits(model, data, iters=10, kernel='se', cl=0.1, v=0.0, num_samples=500, verbose=True, best_model_log=False):\n\n num_dims = model.num_dims # number of hyperparameter dimensions\n\n # initial model evaluation\n points = model.encode()[np.newaxis,:]\n scores = np.array([model.train_test_cv(data)])\n\n # best model and corresponding value at each iteration\n if best_model_log:\n best_point_tmp = []\n best_point_tmp.append(points[0,:])\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(0, scores[0]))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(0,points[0,:] scores[0]))\n\n # loop\n for i in range(iters):\n\n # sample num_Samples random points from [0,1)^num_dims\n candidates = sample(num_dims, num_samples)\n\n # find GP posterior\n A = formK(candidates, candidates, kernel, cl)\n B = formK(points, points, kernel, cl) + v*np.eye(points.shape[0])\n C = formK(candidates, points, kernel, cl)\n tmp = C.dot(np.linalg.inv(B))\n mu = tmp.dot(scores)\n Sigma = A - tmp.dot(C.T)\n var = np.diagonal(Sigma) + np.finfo(float).eps\n sig = np.sqrt(var)\n\n # choose new point with best expected improvement\n exp_imp = expected_improvement(scores.min(), mu, sig)\n best_idx = np.argmax(exp_imp)\n best_point = candidates[best_idx]\n\n # set hyperparameters with best sampled point\n model.decode(best_point)\n\n # return re-encoded point\n new_point = model.encode()\n\n # evaluate model\n new_score = model.train_test_cv(data)\n\n # append to points/scores lists\n points = np.vstack((points, best_point)) # use best_point, not re-encoded new_point to break discrete symmetries\n scores = np.append(scores, new_score)\n\n # save progress\n save_checkpoint(points, scores)\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(i+1, new_score))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(i+1, best_point, new_score))\n\n if best_model_log:\n ind = np.argmin(scores)\n best_point_tmp.append(points[ind])\n\n\n\n # return best model\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n if not best_model_log:\n return model\n else:\n return model, best_point_tmp", "def update_weights(weights, alpha, y_true, y_pred):\n def change_labels(arr):\n for i,a in enumerate(arr):\n if a == 0:\n arr[i] = -1\n return arr \n \n y_true, y_pred = change_labels(y_true), change_labels(y_pred)\n w_hat = weights * np.exp(-alpha * y_true * y_pred)\n return w_hat / sum(w_hat)", "def loopy_belief_propagation(tests, groups,\n base_infection_rate,\n sensitivity, specificity,\n min_iterations, max_iterations,\n atol):\n n_groups, n_patients = groups.shape\n if np.size(groups) == 0:\n if np.size(base_infection_rate) == 1: # only one rate\n marginal = base_infection_rate * np.ones(n_patients)\n return marginal, 0\n elif np.size(base_infection_rate) == n_patients:\n return base_infection_rate, 0\n else:\n raise ValueError(\"Improper size for vector of base infection rates\")\n\n mu = -jax.scipy.special.logit(base_infection_rate)\n\n groups_size = np.sum(groups, axis=1)\n sensitivity = utils.select_from_sizes(sensitivity, groups_size)\n specificity = utils.select_from_sizes(specificity, groups_size)\n gamma0 = np.log(sensitivity + specificity - 1) - np.log(1 - sensitivity)\n gamma1 = np.log(sensitivity + specificity - 1) - np.log(sensitivity)\n gamma = tests * gamma1 + (1 - tests) * gamma0\n test_sign = 1 - 2 * tests[:, np.newaxis]\n\n # Initialization\n alphabeta = np.zeros((2, n_groups, n_patients))\n alpha_beta_iteration = [alphabeta, 0]\n\n # return marginal from alphabeta\n def marginal_from_alphabeta(alphabeta):\n beta_bar = np.sum(alphabeta[1, :, :], axis=0)\n return jax.scipy.special.expit(-beta_bar - mu)\n\n # lbp loop\n def lbp_loop(_, alphabeta):\n alpha = alphabeta[0, :, :]\n beta = alphabeta[1, :, :]\n\n # update alpha\n beta_bar = np.sum(beta, axis=0)\n alpha = jax.nn.log_sigmoid(beta_bar - beta + mu)\n alpha *= groups\n\n # update beta\n alpha_bar = np.sum(alpha, axis=1, keepdims=True)\n beta = np.log1p(test_sign *\n np.exp(-alpha + alpha_bar + gamma[:, np.newaxis]))\n beta *= groups\n return np.stack((alpha, beta), axis=0)\n\n def cond_fun(alpha_beta_iteration):\n alphabeta, iteration = alpha_beta_iteration\n marginal = marginal_from_alphabeta(alphabeta)\n marginal_plus_one_iteration = marginal_from_alphabeta(\n lbp_loop(0, alphabeta))\n converged = np.allclose(marginal, marginal_plus_one_iteration, atol=atol)\n return (not converged) and (iteration < max_iterations)\n\n def body_fun(alpha_beta_iteration):\n alphabeta, iteration = alpha_beta_iteration\n alphabeta = jax.lax.fori_loop(0, min_iterations, lbp_loop, alphabeta)\n iteration += min_iterations\n return [alphabeta, iteration]\n\n # Run LBP while loop\n while cond_fun(alpha_beta_iteration):\n alpha_beta_iteration = body_fun(alpha_beta_iteration)\n\n alphabeta, _ = alpha_beta_iteration\n\n # Compute two consecutive marginals\n marginal = marginal_from_alphabeta(alphabeta)\n marginal_plus_one_iteration = marginal_from_alphabeta(lbp_loop(0, alphabeta))\n\n return marginal, np.amax(np.abs(marginal - marginal_plus_one_iteration))", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)", "def _generate_prior(self, text_lengths, feats_lengths,\n w=1) -> paddle.Tensor:\n B = len(text_lengths)\n T_text = text_lengths.max()\n T_feats = feats_lengths.max()\n\n bb_prior = paddle.full((B, T_feats, T_text), fill_value=-np.inf)\n for bidx in range(B):\n T = feats_lengths[bidx].item()\n N = text_lengths[bidx].item()\n\n key = str(T) + ',' + str(N)\n if self.cache_prior and key in self._cache:\n prob = self._cache[key]\n else:\n alpha = w * np.arange(1, T + 1, dtype=float) # (T,)\n beta = w * np.array([T - t + 1 for t in alpha])\n k = np.arange(N)\n batched_k = k[..., None] # (N,1)\n prob = betabinom.pmf(batched_k, N, alpha, beta) # (N,T)\n\n # store cache\n if self.cache_prior and key not in self._cache:\n self._cache[key] = prob\n\n prob = paddle.to_tensor(\n prob, place=text_lengths.place, dtype=\"float32\").transpose(\n (1, 0)) # -> (T,N)\n bb_prior[bidx, :T, :N] = prob\n\n return bb_prior", "def up_next_weight(data, weight_o):\n \n del0 = 4e-3 # minimum coordinate\n delta = 5e-3 # spacing of grid\n M = 10 # number of samples\n S = 5 # number of steps in the random walk\n\n N = data.shape[1]\n\n # Computing Universal Portfolio.\n r = np.ones(N) / N # Start each one at the uniform point\n b = np.ones(r.shape[0])\n \n allM = np.zeros((N, M)) # Take the average of m samples\n for m in range(M):\n b = r.copy()\n for i in range(S):\n bnew = b.copy()\n j = np.random.randint(N - 1)\n a = np.random.choice([-1, 1])\n bnew[j] = b[j] + (a * delta)\n bnew[N - 1] = b[N - 1] - (a * delta)\n if bnew[j] >= del0 and bnew[N - 1] >= del0:\n muliplier_x = min(1, np.exp((b[N - 1] - (2 * del0)) / (N * delta)))\n x = np.prod(data @ b) * muliplier_x\n muliplier_y = min(1, np.exp((bnew[N - 1] - (2 * del0)) / (N * delta)))\n y = np.prod(data @ bnew) * muliplier_y\n pr = min(y / x, 1) # or pr = min(x / y, 1)\n if np.random.rand() < pr:\n b = bnew.copy()\n allM[:, m] = b\n\n weight = np.mean(allM, 1) # Taking the average of m samples.\n return weight / sum(weight)", "def init_weight(self):\n init_bn(self.norm0)", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])" ]
[ "0.59936357", "0.5899231", "0.5710139", "0.5680814", "0.5665641", "0.565304", "0.56094337", "0.56018", "0.55877113", "0.55586326", "0.5552374", "0.5551015", "0.5542684", "0.5506722", "0.5504694", "0.5479312", "0.54784465", "0.5458892", "0.54003364", "0.5388704", "0.5387438", "0.53765374", "0.5354086", "0.534679", "0.53443646", "0.5343532", "0.53322417", "0.53322417", "0.53322417", "0.5319208", "0.5315029", "0.5310786", "0.5307681", "0.53041637", "0.5297385", "0.5294852", "0.52902824", "0.52811116", "0.52726156", "0.5272008", "0.5257635", "0.52500516", "0.5244461", "0.5235521", "0.5232402", "0.523229", "0.52304447", "0.5228851", "0.5221197", "0.5221125", "0.5218613", "0.521838", "0.5213478", "0.5212532", "0.5207952", "0.5207952", "0.5205744", "0.5193184", "0.5187679", "0.5185255", "0.5173973", "0.5170109", "0.51697844", "0.51608187", "0.5156694", "0.51524675", "0.5152295", "0.5149556", "0.51449215", "0.51363754", "0.5131072", "0.5118458", "0.51176965", "0.5112049", "0.51073235", "0.51062596", "0.510597", "0.5104133", "0.5101061", "0.5090552", "0.50897104", "0.50867367", "0.5084889", "0.50796926", "0.50764406", "0.50748914", "0.5073174", "0.50699383", "0.50680864", "0.50655967", "0.5065101", "0.5060886", "0.5055795", "0.5055795", "0.5055795", "0.5053436", "0.50516313", "0.5045471", "0.5044644", "0.50443" ]
0.5979327
1
Exports the given reaction probabilities into a JSON formatted file, saved at filename
def export_json(rxn_probs, filename): with open(filename, 'w') as f: f.write(json.dumps(rxn_probs)) return filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")", "def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)", "def dump(pred_out_path, xyz_pred_list, verts_pred_list):\n # make sure its only lists\n xyz_pred_list = [x.tolist() for x in xyz_pred_list]\n verts_pred_list = [x.tolist() for x in verts_pred_list]\n #import pdb; pdb.set_trace()\n # save to a json\n with open(pred_out_path, 'w') as fo:\n json.dump(\n [\n xyz_pred_list,\n verts_pred_list\n ], fo)\n print('Dumped %d joints and %d verts predictions to %s' % (len(xyz_pred_list), len(verts_pred_list), pred_out_path))", "def class2json(classifier, filename = \"classifier\"):\n model_json = classifier.to_json()\n with open(filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # Serialize weights to HDF5\n classifier.save_weights(filename + \".h5\")\n print(\"Successfully saved the classifier to file \" + filename + \".\")", "def output(self, filename):\n with open(filename, 'w') as f:\n op = {}\n layer_res = []\n alphas_res = []\n for layer in self._layers:\n weights = []\n alphas = []\n for neuron in layer._neurons:\n weights.append(neuron._weights)\n alphas.append(neuron._alpha)\n layer_res.append(weights)\n alphas_res.append(alphas)\n op['layers'] = layer_res\n op['alphas'] = alphas_res\n json.dump(op, f, indent='\\t')", "def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))", "def writeJSON(filename):\n if not filename.endswith('.json'):\n filename += '.json'\n with open(filename, 'w') as f:\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(scores)\n row = { 'id': x,\n 'challenger': types[0], 'collaborator': types[1],\n 'communicator': types[2], 'contributor': types[3],\n 'q1': scores[0], 'q2': scores[1], 'q3': scores[2],\n 'q4': scores[3], 'q5': scores[4], 'q6': scores[5],\n 'q7': scores[6], 'q8': scores[7], 'q9': scores[8],\n 'q10': scores[9], 'q11': scores[10], 'q12': scores[11],\n 'q13': scores[12], 'q14': scores[13], 'q15': scores[14],\n 'q16': scores[15], 'q17': scores[16], 'q18': scores[17]\n }\n json.dump(row, f, sort_keys=True)", "def save(self, filename):\n import json\n\n json = json.dumps(self.joint_limits)\n with open(filename, 'w') as f:\n f.write(json)", "def write_to_json(dicts, filename: str):\n\n with open(filename, 'w', encoding='utf-8') as f:\n mmcv.dump(dicts, f, file_format='json')", "def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')", "def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)", "def save_highscores(self, contents):\n\t\ttry:\n\t\t\twith open(self.filename, 'w') as f_obj:\n\t\t\t\tf_obj.write(json.dumps(contents)) #save as json\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 741 6243 for assistance.')", "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))", "def dump_distributions(self):\n file_path = self.get_local_path(self.filename_distributions)\n\n with open(file_path, \"w\") as f:\n json_obj = {\n \"feature_uniques\": self.feature_uniques,\n \"feature_summaries\": self.feature_summaries,\n }\n json.dump(json_obj, f)\n return file_path", "def save_priors(name, prior_dict):\n with open(name + \"_priors.json\", \"w\") as fp:\n json.dump(prior_dict, fp)", "def write_to_json(self, export_fp: str):\n # TODO:\n pass", "def save(self, characters, filepath):\n\n\t\twith open(filepath, 'w') as out:\n\t\t\tjson.dump(characters, out, sort_keys=True, indent=4)", "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"cost\": str(self.cost.__name__)}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()", "def to_json_file(self, path):\n with open(path, 'w') as f:\n f.write(self.to_json())", "def write_prediction_results(formatted_outputs, file_path):\n\n with codecs.open(file_path, 'w', 'utf-8') as f:\n for formatted_instance in formatted_outputs:\n json_str = json.dumps(formatted_instance, ensure_ascii=False)\n f.write(json_str)\n f.write('\\n')\n zipfile_path = file_path + '.zip'\n f = zipfile.ZipFile(zipfile_path, 'w', zipfile.ZIP_DEFLATED)\n f.write(file_path)\n\n return zipfile_path", "def write_to_json(missense_dict, frame_shift_dict, missense_name_dict, frame_shift_name_dict, person):\n json_file[person] = {\n \"missense_variant\": missense_dict,\n \"missense_HGNC_name\": missense_name_dict,\n \"frame_shift_variant\": frame_shift_dict,\n \"frame_shift_HGNC_name\": frame_shift_name_dict}", "def write_submission(ratings, file_name):\n # Build output string to write into the file\n output = \"Id,Prediction\\n\"\n for (row, col, rat) in ratings:\n # every line is of the format 'rX_cY,R' where X and Y correspond to row(user) and column(movie) indices and R is the rating\n # we have do increase row and col by one because numpy arrays use 0-base indexing while movie/user indices start at 1\n output += \"r%d_c%d,%f\\n\" % (row + 1, col + 1, rat)\n \n # Write file \n with open(os.path.join('../predictions_csv', file_name), 'w') as output_file:\n output_file.write(output)\n \n return output", "def save_modal_output_to_json(file_name: str, data_to_save: dict) -> str:\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n data_handler_app = apps.get_app_config('data_handler')\n data_handler_path = data_handler_app.path\n modal_output_dir = Path(data_handler_path) / \"PM_Model\" / 'model_output_files'\n json_output_file = modal_output_dir / file_name\n json_file = open(json_output_file.as_posix(), 'w')\n json.dump(data_to_save, json_file, indent=3, sort_keys=True)\n json_file.close()\n\n return json_output_file.as_posix()\n except Exception as ex:\n json_output_file.unlink()\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())\n return \"\"", "def export_json(contents, filename):\n with open(filename, 'w') as f:\n json.dump(contents, f)", "def saveFile(self, filename=\"UQModelTest.json\"):\n sd = self.saveDict()\n with open(filename, \"w\") as f:\n json.dump(sd, f, indent=2)", "def to_file(self, filename):\n\n output_dict = {'random_forest': self.random_forest,\n 'apply_preprocessing': self.apply_preprocessing,\n 'apply_postprocessing': self.apply_postprocessing}\n pickle.dump(output_dict, open(filename, \"wb\"))", "def toFile(self, file_path) -> None:\n\t\tjson_repr = self.toJSON()\n\t\t\n\t\twith open(file_path, \"w\") as f:\n\t\t\tf.write(json_repr)", "def save_to_file(cls, list_objs):\n namefile = cls.__name__ + \".json\"\n rep_list = []\n if list_objs is not None and list_objs != []:\n for item in list_objs:\n repre = cls.to_dictionary(item)\n # rep_list.append(cls.to_json_string(repre))\n rep_list.append(repre)\n\n with open(namefile, \"w\", encoding=\"UTF-8\") as f:\n # json.dump(rep_list, f)\n f.write(cls.to_json_string(rep_list))", "def write_in_json(data):\n with open('genre.json', 'w') as data_file:\n json.dump(data, data_file, indent= 4)", "def save(self, filename=None):\n if filename is None:\n filename = \"morse_smale_complex.json\"\n with open(filename, \"w\") as fp:\n fp.write(self.to_json())", "def save_to_file(data):\n\ttry:\n\t\toutput_file = open(\"output.json\", \"w\")\n\t\toutput_file.write(json.dumps(data))\n\texcept:\n\t print(Fore.GREEN + \"File not found or path is incorrect\")\n\tfinally:\n\t print(Fore.GREEN + \"Success go to output.json to look at the json\")", "def SaveWeights(ss, filename):\n ss.Net.SaveWtsJSON(filename)", "def save_to_file(self, filename: str) -> None:\n channels = {\n 'channels': {\n channel: [self._command_entry_to_json(command) for command in commands]\n for (channel, commands) in self.history.items()\n }\n }\n\n # if we can't save it, exit early\n try:\n channel_json = json.dumps(channels)\n with open(filename, 'w') as f:\n f.write(channel_json)\n except:\n return None\n\n self.needs_save = False", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_to_json(results, filename):\r\n dicts = []\r\n for row in results:\r\n print(row.neo)\r\n r = {'datetime_utc': datetime_to_str(row.time),\r\n 'distance_au': row.distance, 'velocity_km_s': row.velocity,\r\n 'designation': row._designation,\r\n 'neo': {'designation': row.neo.designation,\r\n 'name': row.neo.name, 'diameter_km': row.neo.diameter,\r\n 'potentially_hazardous': row.neo.hazardous}}\r\n dicts.append(r)\r\n\r\n with open(filename, 'w') as json_file:\r\n json.dump(dicts, json_file, indent=4, sort_keys=False)", "def save():\n filename = request.args.get(\"filename\", default=\"book\")\n if '.json' not in filename:\n filename += \".json\"\n with open('data/' + filename, 'w') as f:\n f.write(json.dumps(channels))\n return json_back()\n return \"ERROR\"", "def to_file(self, fname, delimiter=\"\\t\", encoding=\"utf-8\"):\n with open(fname, \"wb\") as fh:\n for key, score in self.ranked_items():\n fh.write(self.to_record(key, score, delimiter).encode(encoding))", "def save_file(data, filename):\n with open(filename, \"w\") as outfile:\n json.dump(data, outfile)", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile", "def write_to_json(dictData, fileout):\n\t# Prepare the output file\n\tfout = codecs.open(fileout, 'w', 'utf-8')\n\thwDict = prepare_hw_dict(dictData)\n\tjson.dump(hwDict, fout)\n\t# Give some summary to the user\n\tprint('JSON generated. Success!')\n\tprint('{} headwords written to JSON file.'.format(len(hwDict)))", "def save_as_json(self, file_path):\n with open(file_path, 'w') as fd:\n json.dump(self._ngrams, fd)", "def write_folds_to_json(self, filepath: str):\n with open(filepath, \"w\") as f:\n json.dump(\n {\n \"isH5\": self.is_h5_dataset,\n \"folds\": self.folds,\n },\n f,\n indent=4,\n )", "def write_result(dict, out_path):\n with open(out_path, 'w') as f:\n json.dump(dict, f)", "def write_json(dictionary, filename):\r\n with open(filename, 'w') as data_file:\r\n json.dump(dictionary, data_file, indent=4, sort_keys=True)\r\n print('--> Wrote ' + os.path.basename(filename))", "def save(self, filename):\n self.graph.save(filename)\n with open(filename + \".json\", \"w\") as f:\n f.write(json.dumps(self.params))", "def to_file(self, file_name: str) -> None:\n\n with open(file_name, 'w') as fi:\n json.dump(self.to_dict(), fi, indent=1)", "def _json_write(filename, res):\n with open(filename, 'w+') as file:\n return json.dump(res, file)", "def write_json_file(self, fname, content):\n pass", "def write_json_file(self, fname, content):\n pass", "def write(chr_dict,filename):\n chr_name_list = sorted(chr_dict.keys())\n file = open(filename,'w+')\n for chr_name in chr_name_list:\n chr_list = chr_dict[chr_name]\n for i in range(len(chr_list)):\n file.write('%s\\t%s\\t%d\\n' % (chr_name,'\\t'.join(map(str,\\\n chr_list[i])),chr_list[i][1]-chr_list[i][0]))\n file.close()", "def save_json(file_name, file_content):\n with open(generate_file_path(\"output\", file_name), 'w', encoding='utf-8') as f:\n json.dump(file_content, f, ensure_ascii=False, indent=4)", "def fullJSON(self, filename=None, downsample=None):\n d = self.robotGridDict(downsample)\n if filename is not None:\n with open(filename, \"w\") as f:\n json.dump(d, f, separators=(',', ':'))\n else:\n return json.dumps(d)", "def to_json(d, filename):\n with open(filename, 'wb') as outfile:\n json.dump(d, outfile, indent=4)", "def save(self, filename):\n content = self.to_dict()\n with open(filename, 'w') as f:\n json.dump(content, f)", "def serialize_process(markov_process, file_path=None):\n obj = [matrix.to_serializable_dict() for matrix in markov_process]\n if file_path is None:\n return json.dumps(obj)\n with open(file_path, 'w') as f:\n json.dump(obj, f)", "def write_tojson(data, filename) -> None:\r\n with open(\"static/json/\" + filename, \"w\") as out:\r\n out.write(\r\n json.dumps(\r\n [data[datum].__dict__() for datum in data]\r\n )\r\n )", "def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))", "def Save(self, filename: str):\n data_object = {\n \"input_layer_count\" : self.input_layer_size,\n \"hidden_layer_count\" : self.hidden_layer_size,\n \"output_layer_count\" : self.output_layer_size,\n\n \"hidden_layer_biases\" : self.hidden_layer_biases.tolist(),\n \"output_layer_biases\" : self.output_layer_biases.tolist(),\n\n \"input_to_hidden_weights\" : self.input_to_hidden_weights.tolist(),\n \"hidden_to_output_weights\" : self.hidden_to_output_weights.tolist()\n }\n\n with open(filename, \"w\") as f:\n json.dump(data_object, f)", "def write_krun_results_file(results, filename):\n\n with bz2.BZ2File(filename, 'wb') as file_:\n file_.write(json.dumps(results, indent=4))", "def to_json_file(test_health_list: List[TestHealthInfo],\n output_path: pathlib.Path) -> None:\n test_health_dicts = _to_test_health_dicts(test_health_list)\n\n with open(output_path, 'w') as json_file:\n for test_health in test_health_dicts:\n json.dump(test_health, json_file, allow_nan=False)\n json_file.write('\\n')", "def write_json(self, filename):\n data = {\n \"fleets\": json.loads(self.manager_df.to_json(orient=\"records\")),\n \"transports\": json.loads(self.transport_df.to_json(orient=\"records\")),\n \"customers\": json.loads(self.customer_df.to_json(orient=\"records\")),\n \"stations\": json.loads(self.station_df.to_json(orient=\"records\")),\n \"simulation\": json.loads(self.df_avg.to_json(orient=\"records\"))\n }\n\n with open(filename, 'w') as f:\n f.seek(0)\n json.dump(data, f, indent=4)", "def save_to_file(cls, list_objs):\n filename = cls.__name__\n dictt = []\n if list_objs:\n for i in list_objs:\n dictt.append(cls.to_dictionary(i))\n\n with open(filename + \".json\", \"w\") as myfile:\n myfile.write(cls.to_json_string(dictt))", "def save_dict_to_json(dictionary, filename):\n with open(filename, 'w+') as f:\n json.dump(dictionary, f)", "def save_log(self): \n result = {}\n for i, agent in enumerate(self.agents):\n result[\"agent\"+str(i)+\"ave_reward_list\"] = agent.average_reward_list\n\n f = open(\"agent_ave_reward.json\", \"w\")\n json.dump(result, f)", "def save_to_json(filename, data):\n logging.debug('Saving results to %s', filename)\n try:\n with open(filename, 'w') as file:\n json.dump(data, file)\n except IOError:\n logging.error('File %s cannot be opened for write', filename)\n exit(0)\n logging.debug('Successfully saved results to %s', filename)\n return data", "def from_json_file(path):\n with open(path, 'r') as f:\n return ReactionProbabilities.from_json(f.read())", "def dict_2_json(obj, filename):\n\twith open('data/output/' + filename, 'w') as fp:\n\t\tjson.dump(obj, fp, indent=4)", "def to_file(self, fp):\n dict_ = self.serialize()\n with open_file(fp, mode='w') as writer:\n json.dump(dict_, writer, indent=2)", "def save_dict_as_json(dict, output_path):\n with open(output_path, \"w\") as f:\n json.dump(dict, f)", "def save_to_file(self, filename: str):\n prepare = asdict(self)\n for sequencer in prepare['Sequencers']:\n for step in sequencer['Sequence']:\n if 'Name' in step.keys() and step['Name'] == '':\n step.pop('Name')\n if 'StartingFrom' in step.keys():\n step['Repeat'] = {}\n step['Repeat']['StartingFrom'] = step['StartingFrom']\n step['Repeat']['Count'] = step['Count']\n step.pop('StartingFrom')\n step.pop('Count')\n pprint.sorted = lambda x, key=None: x\n text: str = pprint.pformat(prepare, indent=0)\n text = text.replace(r\"'\", \"\")\n text = text[1:-1]\n f = open(filename, \"w\", encoding='utf-8')\n f.write(text)", "def to_json(self, fname):\n fname = enforce_extension(fname, \".json\")\n write_json(self.as_dict, fname)", "def to_bin_json(self, json_filename):\n def rel_to_bin(r):\n \"\"\"@param ProductType.Relation r: relation\"\"\"\n assert r.rel_type in ProductTypeDict.REL_MAPPING\n rel_attr = '%0.2f' % r.rel_attr if isinstance(r.rel_attr, (float, int)) else to_str(r.rel_attr) or ''\n return '%s%s%s' % ('+'.join(map(str, r.to_type.get_terms_ids())), ProductTypeDict.REL_MAPPING[r.rel_type], rel_attr)\n\n types = self._get_json_repr_dict()\n bin_types = {\n '+'.join(map(str, p_type.get_terms_ids())): [rel_to_bin(r) for r in rels[1:]]\n for p_type, rels in types.viewitems()\n }\n\n # Term dictionary checksum to validate correctness of term_ids in output file during load\n dawg_checksum = TypeTerm.term_dict.dawg_checksum()\n with open(json_filename, 'wb') as f:\n f.truncate()\n f.write(ujson.dumps([{ProductTypeDict.DAWG_CHECKSUM_ATTR: dawg_checksum}, bin_types], ensure_ascii=False))\n\n if self.VERBOSE:\n rel_count = sum(map(len, bin_types.values()))\n print(\"Dumped binary json of %d type tuples with %d relations to %s\" % (len(bin_types), rel_count, json_filename))", "def to_file(self, file_path: path_t):\n # ensure pathlib\n file_path = Path(file_path)\n _assert_file_extension(file_path, \".json\")\n\n to_export = [\"name\", \"structure\", \"test_times\"]\n json_dict = {key: self.__dict__[key] for key in to_export}\n with file_path.open(mode=\"w+\", encoding=\"utf-8\") as fp:\n json.dump(json_dict, fp)", "def to_json(self, json_filename):\n types = self._get_json_repr_dict()\n text_types = OrderedDict((to_str(p_type), map(to_str, rels)) for p_type, rels in types.viewitems())\n\n with open(json_filename, 'wb') as f:\n f.truncate()\n f.write(json.dumps(text_types, indent=4, ensure_ascii=False).encode('utf-8'))\n\n \"\"\"\n pd_hdiet = ProductTypeDict()\n pd_hdiet.min_meaningful_type_capacity = 1\n pd_hdiet.from_json(build_path(ensure_baseline_dir(), 'product_types_hdiet.json', None), dont_change=True)\n hdiet_types = pd_hdiet.get_type_tuples()\n\n with open(json_filename + '.~', 'wb') as f:\n f.truncate()\n f.write('\\r\\n'.join(to_str(t).encode('utf-8') + ('[hdiet]' if t in hdiet_types else '')\n for t, v in sorted(self.get_type_tuples(meaningful_only=True).viewitems(), key=lambda _t: to_str(_t[0]))))\n \"\"\"\n if self.VERBOSE:\n rel_count = sum(map(len, text_types.values())) - len(text_types) # subtract sqn count rows for each type\n print(\"Dumped json of %d type tuples with %d relations to %s\" % (len(text_types), rel_count, json_filename))", "def save(self, filename):\n with open(filename, \"w\") as f:\n m = {\n \"order\": self.order,\n \"pad\": self.pad,\n \"records\": {str(k): v for k, v in self.records.items()}\n }\n json.dump(m, f)", "def print_json(\n atoms, cell, filedesc=sys.stdout, title=\"\", cell_conv=1.0, atoms_conv=1.0\n):\n\n a, b, c, alpha, beta, gamma = mt.h2abc_deg(cell.h * cell_conv)\n\n natoms = atoms.natoms\n # direct access to avoid unnecessary slow-down\n qs = dstrip(atoms.q) * atoms_conv\n lab = dstrip(atoms.names)\n\n data = {}\n data[\"natoms\"] = natoms\n data[\"cell\"] = [a, b, c, alpha, beta, gamma]\n data[\"title\"] = title\n data[\"q\"] = qs.tolist()\n data[\"labels\"] = lab.tolist()\n\n filedesc.write(json.dumps(data))\n filedesc.write(\" \\n\")", "def save_reaction(self, filename):\n filename = filename.replace('.pkl', '.gpkl')\n filename = filename.replace('.bpkl', '.gpkl')\n # Overwrites any existing file.\n with gzip.GzipFile(filename, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def write_output(version, aliases, zones, filename):\n data = OrderedDict()\n data[\"version\"] = version\n data[\"aliases\"] = OrderedDict(sorted(aliases.items()))\n data[\"zones\"] = OrderedDict(sorted(zones.items()))\n\n with open(filename, \"w\") as jsonfile:\n json.dump(data, jsonfile, indent=2, separators=(\",\", \": \"))\n jsonfile.write(\"\\n\")", "def export_json(path):\n export_db(path)", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def save_recognition_file(self, recognition_json):\n save_path = os.path.join(self.session_dir, '{}.{}'.format('recognition', 'json'))\n with open(save_path, 'w') as out_file:\n out_file.write(recognition_json)", "def dict2file(dict, filename, foldername):\n if foldername:\n if not os.path.exists(\"../Created_QD/\" + foldername):\n os.makedirs(\"../Created_QD/\" + foldername)\n file = open(\"../Created_QD/\" + foldername + \"/\" + filename + \".xyz\", \"w\")\n else:\n file = open(\"../Created_QD/\" + filename + \".xyz\", \"w\")\n file.write(\" \\n\\n\")\n for atom, values in dict.items():\n file.write(values['element'] + \"\\t\" + str(values['coor'][0]) + \"\\t\\t\" +\n str(values['coor'][1]) + \"\\t\\t\" + str(values['coor'][2]) + \"\\n\")\n file.seek(0)\n file.write(str(len(dict)))\n file.close()\n print(\"\\nQuantum Dot created :)\")", "def write(filename, parameters):\n with open(filename, \"w\") as f:\n json.dump(parameters, f, indent=4)", "def write_to_json(config: dict, filename: str):\n\n with open(filename, 'w', encoding='utf-8') as f:\n mmengine.dump(config, f, file_format='json')", "def output_data(data, filename):\n if filename:\n with open(filename, 'w') as f:\n json.dump(data, f, indent=4)\n else:\n print(json.dumps(table_data, indent=4))", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def save_hyp(hyperparameter, filepath):\n with open(filepath, 'w') as f:\n json.dump(hyperparameter, f)", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])", "def write_out():\n os.replace(\"recipes.json\", \".recipes.json.backup\")\n with open(\"recipes.json\", \"w\") as recipes_file:\n json.dump(recipebook.to_json_list(),recipes_file)", "def write_completed_dictionary_to_file(the_dict):\n\ttry:\n\t\toutputLocation = open('usable_dictionary.json','w')\n\t\toutputString = str(the_dict)\n\t\toutputLocation.write(outputString)\n\t\toutputLocation.close()\n\texcept IOError:\n\t\tprint (\"could not open file\")", "def produce(self, message=None, **kwargs):\n serialized_message = json_normalize(message)\n serialized_message.to_csv(\n self.config[\"FILE_PATH\"], mode=\"a+\", index=False, header=False\n )", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def metrics_to_json(pipeline_result, fname):\n metric_results = pipeline_result.metrics().query()\n results = {}\n for counter in metric_results['counters']:\n counter_name = counter.key.step + ':' + counter.key.metric.name\n results[counter_name] = counter.result\n with tf.io.gfile.GFile(fname, 'w') as f:\n f.write(json.dumps(results, indent=4, sort_keys=True))" ]
[ "0.6705845", "0.641551", "0.6144338", "0.6136017", "0.6124605", "0.612017", "0.61101353", "0.60968125", "0.60849845", "0.5977744", "0.59029186", "0.587066", "0.5857155", "0.5841988", "0.5835876", "0.5835178", "0.5819759", "0.5813063", "0.5810192", "0.57985073", "0.5744357", "0.57422674", "0.57321066", "0.57320017", "0.5696555", "0.56672543", "0.5659373", "0.56575745", "0.5654339", "0.5648293", "0.56417584", "0.5626967", "0.56186575", "0.5616393", "0.56136864", "0.5607801", "0.5604804", "0.5604804", "0.5604804", "0.55708146", "0.55601496", "0.5559851", "0.5537595", "0.55357414", "0.5526365", "0.55204624", "0.5518187", "0.5495465", "0.54946226", "0.54903615", "0.54813576", "0.5465955", "0.54619217", "0.5459109", "0.5459109", "0.5451196", "0.5443843", "0.5443813", "0.54365724", "0.54274225", "0.54271704", "0.5425007", "0.5423975", "0.54213256", "0.54178935", "0.5415762", "0.54105914", "0.5410544", "0.5408023", "0.5407029", "0.5403147", "0.5399434", "0.5392438", "0.53799534", "0.537972", "0.5374443", "0.5371289", "0.536913", "0.53676754", "0.5365542", "0.53647983", "0.53579396", "0.5356877", "0.5353809", "0.53434545", "0.53163207", "0.5310696", "0.5310209", "0.5305165", "0.52999353", "0.52988684", "0.5291882", "0.529016", "0.52886134", "0.5287531", "0.52870786", "0.5286903", "0.5282667", "0.52784157", "0.5269654" ]
0.8034322
0
return the probability of a given reaction
def get_probability(self, reaction): return self.__getitem__(reaction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def calculate_probability(self):\n return 0", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def p(self) -> Probability:\n ...", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def reconstructed_probability(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n pred = self.predict(x)\n recon_dist = Normal(pred['recon_mu'], pred['recon_sigma'])\n x = x.unsqueeze(0)\n p = recon_dist.log_prob(x).exp().mean(dim=0).mean(dim=-1) # vector of shape [batch_size]\n return p", "def get_response_probability(self, ind):\n return self.rp_t[ind]", "def get_response_probability(self, ind):\n pass", "def act(self, observation):\n self.t += 1\n\n probabilities = self.probabilities(observation)\n\n probabilities *= self.action_mask(observation[0])\n\n if probabilities.sum() == 0.0:\n probabilities += 1.0\n\n return torch.multinomial(probabilities, num_samples=1)[0]", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]", "def getActionProb(self, canonicalBoard, temp=1):\n for i in range(self.args.numMCTSSims):\n dir_noise = (i == 0 and self.dirichlet_noise)\n self.search(canonicalBoard, dirichlet_noise=dir_noise)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [\n self.Nsa[(s, a)] if (s, a) in self.Nsa else 0\n for a in range(self.game.getActionSize())\n ]\n\n if temp == 0:\n bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()\n bestA = np.random.choice(bestAs)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x**(1. / temp) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n return probs", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def _pick_next_reaction(net, r0):\n\n propensities = []\n for reaction in net.reactions:\n try:\n div_result = reaction.rate(net.species) / r0\n except ZeroDivisionError:\n div_result = reaction.rate(net.species) / 1\n propensities.append(div_result)\n\n random_reaction = GillespieSimulator._pick_weighted_random(net.reactions, propensities)\n return random_reaction.change_vector(net.species)", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n assert step_rewards is not None\n probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))\n probabilities = probabilities / torch.sum(probabilities)\n\n if action is not None:\n return probabilities[action]\n else:\n return probabilities", "def ProbCorrect(efficacy, difficulty, a=1):\n return 1 / (1 + math.exp(-a * (efficacy - difficulty)))", "def prob(self, w):\n return self.counts[w] / self.total_count", "def get_chance(x):\n e = math.exp(1)\n return (1.0 + e) / (1. + math.exp(x + 1))", "def reaction_rate (self):\n raise NotImplementedError('Subclass must implement this method')", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def probabilities(self):\n raise NotImplementedError", "def prob(throw, n, d=6, type='classical'):\n count = 0\n table = throw_table(n, d, type)\n for t in table:\n if sum(t) == throw:\n count += 1\n \n return float(count)/len(table)", "def probability(self, left, right=None):\n return 1", "def get_action_probs(self, state):\n state = state.astype(np.float32)\n return self.session.run(self.action_probs,\n feed_dict={self.s_placeholder: state})", "def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count", "def get_probability(letters, n):\n return {l: c/n for l, c in letters.items()}", "def probability(p):\n return p > random.uniform(0.0, 1.0)", "def get_probability(self, combination):\n\n\t\tprob = 1\n\t\tfor i in np.arange(self.codelength):\n\t\t\tprob *= self.prior[combination[i]-1]\n\t\treturn prob", "def probability(self, token: str, follower: str) -> float:\n return self._cooccurrence_matrix.distribution(token).probability(follower)", "def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))", "def chance(dice):\n return sum(dice)", "def confirmProbability(self, totalDice, bidCount):\n result = self.choose(totalDice, bidCount) * P**bidCount * (1 - P)**(totalDice-bidCount)\n return result", "def get_raw_probability(self):\n\t\tproba = RunOrder.BASE_SUCCESS_PROBABILITY\n\t\tproba += (self.additional_percents + self.hidden_percents) * 10\n\t\treturn proba", "def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total", "def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)", "def calculate_probability(disease, symptoms):\n nominator = disease.probability\n denominator = 0.0\n right = 1.0 - disease.probability\n for i in range(len(symptoms)):\n if symptoms[i] == SYMPTOM_PRESENT:\n nominator *= disease.present_probs[i]\n right *= disease.not_present_probs[i]\n elif symptoms[i] == SYMPTOM_NOT_PRESENT:\n nominator *= (1.0 - disease.present_probs[i])\n right *= (1.0 - disease.not_present_probs[i])\n denominator = right + nominator\n return round(nominator / denominator, 4)", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def get_action_probability_dict(self, state):\n pass", "def prob(self):\n\t\treturn self._prob", "def bidProbability(self, totalDice, bidCount):\n result = sum([self.confirmProbability(totalDice, i) for i in range(bidCount, totalDice + 1)])\n return result", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def generate_reaction_probabilities(fasta_file, template_model_file, genome_id=None):\n if genome_id is None:\n # Use fasta_file name minus extension. worker uses only for file names and logging\n genome_id = '.'.join(fasta_file.split('.')[0:-1])\n # Create a worker for running the algorithm.\n worker = ProbAnnotationWorker(genome_id)\n try:\n template_model = _load_template_file(template_model_file)\n\n # Run blast using the fasta file.\n blast_result_file = worker.runBlast(fasta_file)\n\n # Calculate roleset probabilities.\n rolestring_tuples = worker.rolesetProbabilitiesMarble(blast_result_file)\n\n # Calculate per-gene role probabilities.\n role_probs = worker.rolesetProbabilitiesToRoleProbabilities(rolestring_tuples)\n\n # Calculate whole cell role probabilities.\n total_role_probs = worker.totalRoleProbabilities(role_probs)\n\n # Calculate complex probabilities.\n complex_probs = worker.complexProbabilities(total_role_probs, complexesToRequiredRoles=_complex_to_roles_dict(template_model))\n\n # Calculate reaction probabilities.\n rxn_probs = worker.reactionProbabilities(complex_probs, rxnsToComplexes=_reactions_to_complexes_dict(template_model))\n\n # Store in dictionary for better serialization\n return ReactionProbabilities([{'reaction': r[0], 'probability': r[1], 'type': r[2], 'complexes': _deserialize_cplx(r[3], worker.config['separator']), 'gpr': r[4]} for r in rxn_probs])\n finally:\n worker.cleanup() # worker creates lots of temporary and intermediate files. Allow it to clean up", "def prob_distr(self, x):\n return 1.0/x", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n if action is not None:\n return torch.tensor(1.0 / len(legal_actions), dtype=self.dtype)\n else:\n return 1.0 / len(legal_actions) * torch.ones(len(legal_actions), dtype=self.dtype)", "def loss_probability(reliability, k, chunk_count):\n return (factorial(chunk_count)/(factorial(chunk_count-k)*factorial(k))\n * pow(1 - reliability,k)*pow(reliability,chunk_count-k))", "def get_action_prob(self, game, probabilistic=True):\n for _ in range(Config.numMCTSSims):\n self.search(game)\n\n state = game.string_representation()\n counts = [\n self.Nsa.get((state, action), 0) for action in range(game.get_action_size())\n ]\n\n if probabilistic:\n if sum(counts) != 0:\n return [x / sum(counts) for x in counts]\n # TODO: understand this case (no valid actions)\n\n probs = [0] * len(counts)\n probs[np.argmax(counts)] = 1\n return probs", "def probability(series, params):\n\n prob = 1\n\n for result in series:\n\n prob *= params[result]\n\n return prob * params[\"die\"]", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def probabilities_score(model_id, test_set_id, rubric_id):\n result = {'true_average_probability': 0, 'false_average_probability': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_probability(model_id, test_set_id, rubric_id)\n\n true_number = 0\n true_probability = 0\n false_number = 0\n false_probability = 0\n\n for key in rubrication_result:\n if answers[key]:\n true_number += 1\n true_probability += rubrication_result[key]\n else:\n false_number +=1\n false_probability += rubrication_result[key]\n\n if true_number:\n result['true_average_probability'] = true_probability / true_number\n\n if false_number:\n result['false_average_probability'] = false_probability / false_number\n\n return result", "def evaluate(self, state, player, random_state):\n result = 0.0\n for _ in range(self.n_rollouts):\n working_state = state.clone()\n while not working_state.is_terminal():\n if working_state.is_chance_node():\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = random_state.choice(action_list, p=prob_list)\n else:\n action = random_state.choice(working_state.legal_actions())\n working_state.apply_action(action)\n result += working_state.player_return(player)\n\n return result / self.n_rollouts", "def evaluate_one(self, x):\n # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))\n p = self.dist.probability(x)\n return p", "def pred(self, w):\n pr = 0;\n res = ''\n for item in self.counts:\n if w in item[:-1] and self.prob(item) > pr:\n# print(\"HIT\")\n# print(item)\n i = item.index(w) + len(w)\n res = item[i]\n pr = self.prob(item)\n if res == '':\n res = '*'\n return res", "def probability(prob):\n return random.random() <= prob", "def get_orbit_probability_mc(self, orbit: list, samples: int = 1000):\n prob = 0\n for _ in range(samples):\n sample = sfs.orbit_to_sample(orbit, self.mode_count)\n prob += self.get_probability(sample)\n\n prob = prob * sfs.orbit_cardinality(orbit, self.mode_count) / samples\n\n return prob", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def moment(self, p):\n return sum([val**p for val in self.counts.values()])", "def probability(self, samples):\n pass", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def prob(self, e):\n\t\treturn self.enumerate_all(self.variables, e)", "def policy_gamble (self):\n\t\tidx = self.idx \t\t\t\t# internal time index of state\n\t\tprobs = self.probs\t\t\t# prob of reward for an action\n\t\tbeta = self.beta\t\t\t# inverse temp \n\n\t\t# softmax\n\t\tAct = beta*self.Q[idx]\n\t\tp = 1./(1. + np.exp(-Act))\t# probability of gamble\n\t\tself.SM[idx] = p\n\n\t\t# decide whether to take gamble based on p\n\t\trnd = np.random.random_sample()\n\t\tif rnd < p:\n\t\t\tC = 1\t# gamble\n\t\telse:\n\t\t\tC = 0\t# no gamble\n\t\tself.C[idx] = C\n\n\t\t# no gamble\n\t\tif C == 0:\t\n\t\t\treward = 0\t\t # gamble reward encoded relative to reward\n\t\t\tself.R[idx] = -1 # rewarded sure thing, coded as -1\n\t\t\tself.PE[idx] = 0 # no PE, get the thing you expected\n\t\t# gamble\n\t\telse:\n\t\t\t# decide whether a reward is delivered\n\t\t\treward = np.random.binomial(size=1, n=1, p=probs)[0]\n\t\t\tself.R[idx] = reward # indicator that reward was received\n\t\t\tif reward == 0:\n\t\t\t\treward = self.l_mag\n\t\t\telse:\n\t\t\t\treward = self.r_mag\n\t\t\tself.PE[idx] = reward - self.Q[idx]", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)", "def calculate_word_probabilities(word):\n\n\tprobabilities = {\"one\":0,\"two\":0,\"three\":0,\"four\":0,\"five\":0}\n\n\tfor star in range(1,6):\n\t\tconditional = float(word[number_to_text[star]])/statements_with_star[star]\n\t\tprobabilities[number_to_text[star]]=conditional*10\n\n\tdb.words.update({\"_id\":ObjectId(word[\"_id\"])},{\"$set\":{\"conditionals\":probabilities}})\n\n\treturn 1", "def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0", "def proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = choices(CHOICES, weights=freqs)[0]\n return CHOICES[(prediction_for_them + 1) % 3]", "def get_transition_prob(self, state, action, next_state):\n return self.get_next_states(state, action).get(next_state, 0.0)", "def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0", "def getReaction(self):\n return _libsbml.FluxObjective_getReaction(self)", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def act_priority(self, decision, choice):\n if choice is None: return 0\n return (100*choice.actions + 10*(choice.coins + choice.cards) +\n choice.buys) + 1", "def get_probability(self, sentence):\n if len(sentence) == 1:\n return Decimal(10) ** self.get_unigram_log_prob(sentence)\n elif len(sentence) == 2:\n return Decimal(10) ** self.get_bigram_log_prob(sentence)\n else:\n log_prob = Decimal(0.0)\n for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]):\n log_prob += self.get_trigram_log_prob((w1, w2, w3))\n log_prob = Decimal(log_prob)\n return Decimal(10) ** log_prob", "def policy(self, state):\n maskState = self.discretizeState(state)\n\n qValues = self._q[maskState]\n qAction0 = qValues[0]\n qAction1 = qValues[1]\n\n if qAction0 == qAction1:\n return random.randint(0, 1)\n if qAction0 > qAction1:\n return 0\n return 1", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def chance(c: float) -> bool:\n return c > r()", "def probability(self, wires=None):\r\n wires = wires or self.wires\r\n # convert to a wires object\r\n wires = Wires(wires)\r\n # translate to wires used by device\r\n device_wires = self.map_wires(wires)\r\n\r\n N = len(wires)\r\n cutoff = getattr(self, \"cutoff\", 10)\r\n\r\n if N == self.state.num_modes:\r\n # probabilities of the entire system\r\n probs = tf.reshape(self.state.all_fock_probs(cutoff=cutoff), -1)\r\n\r\n else:\r\n rdm = self.state.reduced_dm(modes=device_wires.tolist())\r\n new_state = FockStateTF(rdm, N, pure=False, cutoff_dim=cutoff)\r\n probs = tf.reshape(new_state.all_fock_probs(cutoff=cutoff), -1)\r\n\r\n ind = np.indices([cutoff] * N).reshape(N, -1).T\r\n probs = OrderedDict((tuple(k), v) for k, v in zip(ind, probs))\r\n return probs", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def get_event_probability_mc(self, event: tuple, samples: int = 1000):\n photons, max_photons_per_mode = event\n\n prob = 0\n for _ in range(samples):\n sample = sfs.event_to_sample(photons, max_photons_per_mode, self.mode_count)\n prob += self.get_probability(sample)\n\n prob = prob * sfs.event_cardinality(photons, max_photons_per_mode, self.mode_count) / samples\n\n return prob", "def _reward(self):\n\n return 1-self.step_count/ self.max_steps", "def _R(state, effects, observed_frequencies):\n # this small number ~ 10^-304 is added so that we don't get divide by zero errors\n machine_eps = np.finfo(float).tiny\n # have a zero in the numerator, we can fix this is we look a little more carefully.\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n update_operator = sum([effect * observed_frequencies[i] / (predicted_probs[i] + machine_eps)\n for i, effect in enumerate(effects)])\n return update_operator", "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def getReaction(self):\n return _libsbml.FluxBound_getReaction(self)", "def counter_reactionary(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n elif our_hist[-1] == CHOICES[(their_hist[-1] + 1) % 3]:\n return CHOICES[(our_hist[-1] + 1) % 3]\n elif our_hist[-1] == CHOICES[(their_hist[-1] + 2) % 3]:\n return CHOICES[(our_hist[-1] + 2) % 3]", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n omega = self.alpha / (doc_length + self.alpha)\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-omega) * p1 + omega * p2", "def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]", "def calculate_class_apriori_probability(self, class_name):\n unique, counts = np.unique(self.class_data, return_counts=True)\n frequency_dict = dict(zip(unique, counts))\n return float(frequency_dict[class_name])/len(self.class_data)", "def prob(self, event_details):\n color_count_probs = event_details.color_count_probs(self.prob_tier)\n return color_count_probs.loc[\n (\n color_count_probs.index >= tuple(self.stone_summons.values)\n ).all(axis=1)\n & (\n color_count_probs.index * tuple(1 - self.stone_presences.values)\n <= tuple(self.stone_summons.values)\n ).all(axis=1)\n ].sum()", "def getReward(self, state):\n return (state in self.reward_set) * 1", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-self.omega) * p1 + self.omega * p2", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def discounted_reward(self, discount):\n\n tl = len(self)\n return (1 - discount) * np.sum(discount ** np.arange(tl) * self.rewards)", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes" ]
[ "0.6590192", "0.6571907", "0.647744", "0.64491594", "0.6418756", "0.63888365", "0.63714635", "0.6371433", "0.6367964", "0.63313365", "0.6296423", "0.62912256", "0.62699294", "0.62629133", "0.62573755", "0.6252374", "0.62146765", "0.6207749", "0.62025577", "0.6187049", "0.61826676", "0.61214125", "0.61020696", "0.60916483", "0.6077021", "0.6066374", "0.6042399", "0.6018653", "0.6007051", "0.5999446", "0.5999275", "0.597632", "0.59749377", "0.5933652", "0.59157157", "0.591266", "0.5905664", "0.59017605", "0.5899429", "0.5893863", "0.588887", "0.5878651", "0.58763534", "0.58738977", "0.5873091", "0.5862018", "0.5858725", "0.5843253", "0.5822454", "0.58119476", "0.57885504", "0.5786693", "0.578483", "0.57620126", "0.5750336", "0.5744607", "0.57388705", "0.5730414", "0.572359", "0.57167524", "0.5691816", "0.5680768", "0.5666207", "0.56565386", "0.56491846", "0.5648214", "0.56474924", "0.56399316", "0.56366765", "0.563551", "0.5634639", "0.5624373", "0.56155396", "0.560904", "0.56060624", "0.56001216", "0.5599643", "0.5598889", "0.5592355", "0.55734015", "0.5570895", "0.55691415", "0.556069", "0.5560353", "0.5548069", "0.5544259", "0.5527412", "0.5521267", "0.55188096", "0.55181", "0.55174464", "0.5516816", "0.55124456", "0.5509545", "0.55093426", "0.55053425", "0.5493026", "0.5492314", "0.5492289", "0.5491858" ]
0.8662731
0
Serializes this object as a JSON stringrxn
def to_json_file(self, path): with open(path, 'w') as f: f.write(self.to_json())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self):\n return json.dumps(self.as_dict())", "def _toJSON(self):\n\n return json.encode(self.__toJSON())", "def to_json(self) -> str:\n return json.dumps(asdict(self))", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.serialize())", "def serialize(self) -> str:\n return json.dumps(self.__dict__)", "def to_json(self):\n return json.dumps(self.for_json())", "def to_json(self):\n pass", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.__dict__)", "def to_json(self) -> JSON:\n pass", "def toJSON(self):\n raise NotImplementedError()", "def tojson(self):\n return json.dumps(self.jsonable())", "def tojson(self) -> ty.Text:\n return json.dumps(self.todict())", "def toJson(self):\n return json.dumps(self.toDict())", "def toJson(self):\n return json.dumps(self.toDict())", "def to_json(self, **kwargs):\n return dumps(self, **kwargs)", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def to_json(self):\n return json.dumps(self.dict)", "def serialize(self, obj):\n return json.dumps(obj)", "def to_json(self, *args, **kwargs):\n data = self.to_dict()\n\n return json_util.dumps(data)", "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def toJson(self):\n return json.dumps(self.toDict(), default=str)", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(), *args, **kwargs)", "def to_string(self):\n return json.dumps(self.to_json(), cls=ObjectEncoder)", "def to_json(self):\n return json.dumps(self._asdict())", "def to_json(self) -> str:\n return JSONEncoder().encode(vars(self))", "def to_json(self) :\n return jsonpickle.encode(self)", "def to_json(self) -> str:\n return json.dumps(model_to_dict(self))", "def as_json(self):", "def to_json_string(self) -> None:\n return json.dumps(self.to_dict(), indent=2) + \"\\n\"", "def to_json(self):\n return json.dumps(self, default=json_converter, indent=2)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def to_json_string(self):\n return json.dumps(dict(self), indent=2, sort_keys=True) + \"\\n\"", "def asJsonString(self):\n return json.dumps(self.asDict(), sort_keys=True)", "def serialize(self):\r\n return json.dumps({\r\n 'display_name': self.display_name,\r\n 'data': self.data,\r\n 'metadata': self.metadata,\r\n 'graderType': self.grader_type,\r\n 'publish': self.publish\r\n })", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def as_json_string(self):\n return json.dumps(self.as_dict(), sort_keys=True)", "def AsJsonString(self):\n return json.dumps(self.AsDict(), sort_keys=True)", "def jsonify(self):\n jsonObject = self.getJsonObject()\n return json.dumps(jsonObject)", "def to_json(self):\n return None", "def json(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)", "def as_json(self) -> str:\n return json.dumps(self, cls=_RecordingJSONEncoder)", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(primitive=True), *args, **kwargs)", "def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__)", "def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)", "def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)", "def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def toJSON(cls, obj):\n return json.dumps(obj)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def as_json(self) -> str:\n return json.dumps(self, cls=_ProgrammeJSONEncoder)", "def to_string(self) -> str:\n return json.dumps(self.to_json())", "def to_string(self) -> str:\n return json.dumps(self.to_json())", "def to_json(self):\n return json.dumps(self.to_dict(), sort_keys=True)", "def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)", "def toJson(self):\r\n return self.__dict__", "def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True,\n indent=4)", "def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True,\n indent=4)", "def to_json(self):\n return json.dumps(self, default=lambda i: i.__dict__)", "def to_json(self):\n return json.dumps(self.to_dict(), sort_keys=False, indent=2)", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"" ]
[ "0.85692286", "0.85322154", "0.84612757", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84603655", "0.84569603", "0.8437394", "0.84211195", "0.8399535", "0.83540624", "0.83540624", "0.83424884", "0.83178025", "0.83153594", "0.8298456", "0.82581717", "0.82514024", "0.82514024", "0.82220685", "0.82131344", "0.82131344", "0.82131344", "0.813802", "0.81354755", "0.81345075", "0.8133272", "0.81322134", "0.81284964", "0.8100331", "0.80918556", "0.8087554", "0.8071716", "0.8035616", "0.8031932", "0.8030654", "0.80267495", "0.80257463", "0.8018256", "0.8018256", "0.8010255", "0.80037236", "0.79901546", "0.7983796", "0.79816455", "0.79816455", "0.79816455", "0.7973204", "0.79686064", "0.79650265", "0.79502094", "0.79500836", "0.79275566", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.790006", "0.78924865", "0.7892211", "0.7877318", "0.7877318", "0.7877318", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78753006", "0.78571296", "0.7856761", "0.7851152", "0.7851152", "0.7850761", "0.7844007", "0.7844007", "0.78362733", "0.7832441", "0.7832306", "0.7831514", "0.7831514", "0.7821222", "0.7790022", "0.77894765", "0.77894765", "0.77894765" ]
0.0
-1
Deserialize a ReactionProbabilities from a JSON file
def from_json_file(path): with open(path, 'r') as f: return ReactionProbabilities.from_json(f.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_priors(file_name):\n with open(file_name, \"r\") as fp:\n priors = json.load(fp)\n return priors", "def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data", "def load_priors(self, json_file):\n\n with open(json_file, 'r') as jf:\n self.priors_dict = json.load(jf)", "def load_reseq_conditions_from(json_file_or_dict):\n\n # refactor that common useage from TC io\n if isinstance(json_file_or_dict, dict):\n d = json_file_or_dict\n else:\n with open(json_file_or_dict, 'r') as f:\n d = json.loads(f.read())\n\n return ReseqConditions.from_dict(d)", "def load_raw_annot(filename):\n with open(filename, 'r') as fp:\n data = json.loads(fp.read())\n\n mapping = _create_mapping()\n\n for k in data.keys():\n for i in xrange(len(data[k])):\n data[k][i] = eval_utils.revise_sentiment(data[k][i], mapping)\n return data", "def from_json(cls, file_path):\n profile = cls()\n with open(file_path, 'r') as fd:\n profile._ngrams = json.load(fd)\n return profile", "def from_json(cls, file):\n ref = os.path.basename(file)\n with open(file, 'r') as fp:\n j = json.load(fp)\n\n return sorted([cls.from_dict(ref, d) for d in j[ref]], key=lambda x: x.priority)", "def _load(predictions, f):\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction", "def __init__(self, file):\n with open(file, 'r') as f:\n self.vocab = json.loads(f.read())", "def read_proto(filename):\n results = results_pb2.ImprovementResults()\n with open(filename, 'rb') as f:\n results.ParseFromString(f.read())\n return results", "def __load_class_representation(self, filename):\n\n # Reads in the reverse dictionary from the given file.\n with open(filename) as file:\n return json.load(file)", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)", "def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))", "def deserialize(cls, json_):\n schema = movers_schema.CatsMover()\n if 'tide' in json_:\n schema.add(Tide())\n _to_dict = schema.deserialize(json_)\n\n return _to_dict", "def json2class(filename = \"classifier\"):\n json_file = open(filename + \".json\", 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # Load weights into new model\n loaded_model.load_weights(filename + \".h5\")\n print(\"Loaded model from disk.\")\n return loaded_model", "def from_JSON(cls, filename):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return json.load(f, object_hook=class_hook)", "def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)", "def load(cls, path: str) -> 'Vocab':\n with open(path, 'r', encoding='utf-8') as f:\n return cls.from_json(f.read())", "def load_personality_adj():\n return json.load(open(personality_adj()))", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def load (cls, file):\n with open(file) as f:\n raw = json.load(f)\n obj = PasswordSetCharacteristics()\n obj.lengths = cls.to_num_dict(raw['lengths'])\n obj.lower_counts = cls.to_num_dict(raw['lowerCounts'])\n obj.upper_counts = cls.to_num_dict(raw['upperCounts'])\n obj.digit_counts = cls.to_num_dict(raw['digitCounts'])\n obj.symbol_counts = cls.to_num_dict(raw['symbolCounts'])\n obj.class_counts = cls.to_num_dict(raw['classCounts'])\n obj.word_counts = cls.to_num_dict(raw['wordCounts'])\n return obj", "def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()", "def __init__(self, recipie_file):\n try:\n with open(recipie_file) as json_file:\n self.recipie = json.load(json_file)\n except IOError as io_error:\n raise IOError('File not found: {}'.format(io_error.filename))\n\n try:\n self.ing_pop = self.recipie[MMK.KEY_ING_PROP]\n self.steps = self.recipie[MMK.KEY_STEPS]\n self.pan = self.recipie[MMK.KEY_PAN]\n except KeyError as key_err:\n raise KeyError('{} not found in recipie'\n .format(key_err))", "def load_predictions(fileobj):\n\n def _load(predictions, f):\n \"\"\"Read serialized json from `f`, create examples, and add to `examples`.\"\"\"\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction\n\n predictions = {}\n _load(predictions, fileobj)\n\n return predictions", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def read_json():\n with open(\"Ratings.json\") as json_data:\n json_list = []\n for line in json_data:\n json_dict = json.loads(line)\n json_list.append(json_dict)\n return json_list", "def read_from_file():\n global REPOS\n with file(OUTPUT, 'r') as infile:\n REPOS = json.loads(infile.read())", "def from_json_file(cls, json_file):\n with open(json_file, \"r\") as reader:\n return cls.from_dict(json.load(reader))", "def load_data(path_stats, path_rules):\n with open(path_stats) as json_file:\n material_probs = json.load(json_file)\n with open(path_rules) as json_file:\n convertion_rules = json.load(json_file)\n\n return material_probs, convertion_rules", "def load(path: str):\n lprint(f\"Loading regressor from: {path}\")\n with open(join(path, \"regressor.json\"), \"r\") as json_file:\n frozen = json_file.read()\n\n thawed = jsonpickle.decode(frozen)\n\n thawed.models = []\n for i in range(thawed.num_channels):\n channel_path = join(path, f\"channel{i}\")\n lprint(f\"Loading regressor model for channel {i} from: {path}\")\n with open(join(channel_path, \"regressor_model.json\"), \"r\") as json_file:\n frozen_model = json_file.read()\n\n thawed_model = jsonpickle.decode(frozen_model)\n thawed_model._load_internals(channel_path)\n thawed.models.append(thawed_model)\n\n return thawed", "def _parse_jsonl_vocab_file(self, vocab_file: str):\n with open(vocab_file, \"r\") as f:\n entities_json = [json.loads(line) for line in f]\n\n for item in entities_json:\n for title, language in item[\"entities\"]:\n entity = Entity(title, language)\n self.vocab[entity] = item[\"id\"]\n self.counter[entity] = item[\"count\"]\n self.inv_vocab[item[\"id\"]].append(entity)", "def read_json_file(self, fname):\n return {\n 'maven': {\n 'pck1, pck2, pck3': 5,\n 'pck3, pck56': 20,\n 'pck2, pck4, pck7': 10\n },\n 'npm': {\n 'pck1, pck2, pck3': 22,\n 'pck2, pck4, pck7': 89\n },\n 'pypi': {\n 'pck3, pck56': 65,\n 'pck2, pck4, pck7': 110\n }\n }", "def from_file(cls, file_name):\n\n with open(file_name, 'r') as fi:\n the_dict = json.load(fi)\n return cls.from_dict(the_dict)", "def generate_reaction_probabilities(fasta_file, template_model_file, genome_id=None):\n if genome_id is None:\n # Use fasta_file name minus extension. worker uses only for file names and logging\n genome_id = '.'.join(fasta_file.split('.')[0:-1])\n # Create a worker for running the algorithm.\n worker = ProbAnnotationWorker(genome_id)\n try:\n template_model = _load_template_file(template_model_file)\n\n # Run blast using the fasta file.\n blast_result_file = worker.runBlast(fasta_file)\n\n # Calculate roleset probabilities.\n rolestring_tuples = worker.rolesetProbabilitiesMarble(blast_result_file)\n\n # Calculate per-gene role probabilities.\n role_probs = worker.rolesetProbabilitiesToRoleProbabilities(rolestring_tuples)\n\n # Calculate whole cell role probabilities.\n total_role_probs = worker.totalRoleProbabilities(role_probs)\n\n # Calculate complex probabilities.\n complex_probs = worker.complexProbabilities(total_role_probs, complexesToRequiredRoles=_complex_to_roles_dict(template_model))\n\n # Calculate reaction probabilities.\n rxn_probs = worker.reactionProbabilities(complex_probs, rxnsToComplexes=_reactions_to_complexes_dict(template_model))\n\n # Store in dictionary for better serialization\n return ReactionProbabilities([{'reaction': r[0], 'probability': r[1], 'type': r[2], 'complexes': _deserialize_cplx(r[3], worker.config['separator']), 'gpr': r[4]} for r in rxn_probs])\n finally:\n worker.cleanup() # worker creates lots of temporary and intermediate files. Allow it to clean up", "def load_rentals_file(filename):\n logging.debug('Loading rental file %s', filename)\n try:\n with open(filename) as file:\n try:\n data = json.load(file)\n except ValueError:\n logging.error('File %s cannot be read as JSON', filename)\n exit(0)\n except IOError:\n logging.error('File %s cannot be read (does not exist?)', filename)\n exit(0)\n logging.debug('Successfully loaded rental file %s', filename)\n return data", "def read_json(cls, filename, **kwargs):\n # Open, json load, and close a json file\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n\n # Grab all properties from data-structure\n necessary_args = [\"wildtype\", \"genotypes\", \"phenotypes\"]\n options = {\n \"genotypes\": [],\n \"phenotypes\": [],\n \"wildtype\": [],\n \"stdeviations\": None,\n \"mutations\": None,\n \"n_replicates\": 1,\n }\n # Get all options for map and order them\n for key in options:\n # See if options are in json data\n try:\n options[key] = data[key]\n except KeyError:\n pass\n # Override any properties with manually entered kwargs passed directly\n # into method\n options.update(kwargs)\n args = []\n for arg in necessary_args:\n val = options.pop(arg)\n args.append(val)\n # Create an instance\n gpm = cls(args[0], args[1], args[2], **options)\n return gpm", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def from_file(cls, file_name: str):\n\n with open(file_name, 'r') as fi:\n input_dict = json.load(fi)\n return cls.from_dict(input_dict)", "def from_json(cls, filename):\n with open(filename, 'r') as f:\n loaded_data = json.load(f, object_pairs_hook=OrderedDict)\n return cls(data=loaded_data)", "def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)", "def load_data(filename) :\n data_dict = None\n with open(filename, 'r') as infile :\n data_dict = json.load(infile)\n\n# Reset Counters\n for counter in data_dict['counters'] :\n data_dict['counters'][counter] = 0\n\n return data_dict", "def load(cls, copula_path):\n with open(copula_path) as f:\n copula_dict = json.load(f)\n\n return cls.from_dict(copula_dict)", "def read(fname):\n # Read string from JSON file.\n with open(fname, 'r') as fi:\n serial = fi.read()\n\n # Decode.\n decoder = json.JSONDecoder(object_hook=numpy_hook)\n data = decoder.decode(serial)\n\n return data", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\r\n with open(json_file, \"r\", encoding='utf-8') as reader:\r\n text = reader.read()\r\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def load():\n try:\n with open('learn.json', 'r') as file:\n return json.load(file)\n except IOError:\n return []", "def deserialize(self):\n with open(os.path.join(self.root_path, self._data_file), 'r') as file:\n data = json.load(file)\n for key, val in data.items():\n self.__dict__[key] = val", "def from_json_file(cls, json_file):\n with open(json_file, 'r', encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def read_json(fn):\n with open(fn) as f:\n return json.load(f, object_hook=_operator_object_hook)", "def load(filename):\n with open(filename, \"r\") as f:\n raw = json.load(f)\n\n mc = MarkovChain(\n raw[\"order\"],\n raw[\"pad\"],\n {eval(k): v for k, v in raw[\"records\"].items()}\n )\n\n return mc", "def get_throughput_json(filename):\n with open(filename) as json_data:\n d = json.load(json_data)\n json_data.close()\n return d", "def load_from_json_file(filename):\n with open(filename, encoding=\"utf-8\") as round:\n return json.load(round)", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def _load_vulnerabilities_report_file(file_name):\n with open(os.path.join(module_path, test_name, file_name)) as file:\n json_data = json.load(file)\n return ImageVulnerabilitiesReport.from_json(json_data)", "def read_json(filename):\n with open(filename) as json_file:\n file = json.load(json_file)\n shape = file[\"NN_Shape\"]\n top_genes = file[\"Gene_List\"]\n weights = file[\"Weights\"]\n output_key = file[\"One-Hot_Encoded_Output\"]\n biases = file[\"Biases\"]\n return shape, top_genes, weights, output_key, biases", "def read_json(self, filename):\n with open(filename) as f:\n for line in f:\n pass\n lastWeight = json.loads(line)\n\n return lastWeight", "def vocab_from_json(path: str) -> Dict:\n with open(path, encoding=VOCAB_ENCODING) as inp:\n vocab = json.load(inp)\n logger.info('Vocabulary (%d words) loaded from \"%s\"', len(vocab), path)\n return vocab", "def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n\n with open(path) as fp:\n params = json.load(fp)\n model = cls(params['elements'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['theta'],\n params['gp']['noise'])\n\n gp_filename = params['gp']['filename']\n try:\n model.gp.load(directory / gp_filename)\n except:\n warnings.warn(\"The many-body GP file is missing\")\n pass\n\n return model", "def load_stat(input):\n with open(input['json'], 'r', encoding=input['encoding']) as f:\n return json.load(f)", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n\n with open(path) as fp:\n params = json.load(fp)\n model = cls(params['element'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['theta'],\n params['gp']['noise'])\n\n gp_filename = params['gp']['filename']\n try:\n model.gp.load(directory / gp_filename)\n except:\n warnings.warn(\"The many-body GP file is missing\")\n pass\n\n return model", "def load(cls, file):\n with open(file, \"r\") as f:\n j = json.load(f)\n return cls(**j)", "def load_circuit(filename):\n with open(filename, 'r') as f:\n data = json.load(f)\n \n return Circuit.from_dict(data)", "def export_json(rxn_probs, filename):\n with open(filename, 'w') as f:\n f.write(json.dumps(rxn_probs))\n return filename", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def from_json(fname):\n with open(fname, 'r') as fh:\n d = json.load(fh)\n return d", "def load_co_registration_data_from_json(filename: str) -> Dict[str, CoRegistrationData]:\n with open(filename, \"r\") as json_file:\n data = json.load(json_file)\n co_reg_data = {}\n for index, data in data.items():\n co_reg_data[index] = CoRegistrationData(\n name=str(data['name']),\n target_w=int(data['target_w']),\n target_h=int(data['target_h']),\n transform_matrix=np.array(data['transform_matrix']),\n moving_img_name=str(data['moving_img_name'])\n )\n return co_reg_data", "def extractProbabilities(filePath, C, gamma):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n \n # extract train data\n train = grouped.nth([0, 1])\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n \n # train our model\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma, probability=True),\n n_jobs=4)\n svc.fit(xTrain, yTrain)\n \n classes = svc.classes_\n for index, (_name, data, _emotion) in enumerate(processedJAFFE):\n proba = svc.predict_proba(data.reshape(1, -1))[0]\n processedJAFFE[index][1] = dict(zip(classes, proba))\n pprint(processedJAFFE[index])\n print(svc.predict(data.reshape(1, -1)))\n print('-'*50)\n newFilename = filename+'_probabilities_c_%s_gamma_%s'%(C, gamma)\n print('saving file:', '/data/%s'%newFilename)\n save('../data/probabilities/%s'%newFilename, processedJAFFE)", "def __init__(self, json_file: str):\n\n # Open JSON file and create attribute with JSON data\n with open(json_file, 'r') as f:\n self.file_name = f.name\n self.data = json.load(f)\n \n self.messages = []\n for msg in self.data:\n self.messages.append(Message(msg))", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n return(json.loads(f.read()))", "def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def load_status(sec_status):\n with open(sec_status, 'r') as infile:\n return json.load(infile)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def load_from_json_file(filename):\n with open(filename, mode=\"r\", encoding=\"utf-8\") as a_file:\n return json.loads(a_file.read())", "def read_json(f):\n with open(f, \"r\") as fin:\n return json.loads(fin.read())", "def load_from_file(cls):\n filename = cls.__name__ + \".json\"\n listOfInst = []\n try:\n with open(filename, \"r\") as f:\n listOfInst = cls.from_json_string(f.read())\n for num, val in enumerate(listOfInst):\n listOfInst[num] = cls.create(**listOfInst[num])\n except:\n pass\n return listOfInst", "def load_data(filename):\n with open(filename, 'r') as f:\n d = json.load(f)\n\n return d['nicknames'], d['relations'], d['directed']", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def _read_json(filename):\n with open(filename) as f:\n import json\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, \"r\") as my_file:\n return json.loads(my_file.read())", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def load(filename):\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n net = MFoMNetwork(data[\"sizes\"])\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def load_from_file(cls):\n lis = []\n if not os.path.isfile(cls.__name__ + \".json\"):\n return lis\n with open(cls.__name__ + \".json\", encoding=\"utf-8\") as myFile:\n json_str = myFile.read()\n my_dict = cls.from_json_string(json_str)\n for inst in my_dict:\n lis.append(cls.create(**inst))\n return lis", "def parse():\n with open('src/wator/Properties.json') as data_file:\n data = json.load(data_file)\n return data", "def load_reveiws_dataset(filename):\n review_DataFrame = pd.read_json(filename, lines=True)\n return review_DataFrame", "def load_from_json_file(filename):\n with open(filename, 'r', encoding='utf8') as f:\n return json.load(f)", "def json_parsing():\n with open('countries.json') as f:\n countries = json.load(f)\n\n return countries", "def read_ARC_JSON(filepath):\n \n # Open the JSON file and load it \n data = json.load(open(filepath))\n\n # Extract the train/test input/output grids. Each grid will be a\n # list of lists of ints. We convert to Numpy.\n train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]\n train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]\n test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]\n test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]\n\n return (train_input, train_output, test_input, test_output)", "def get_rating_from_json(json):\r\n return PodiumRating(json[\"rating\"])", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def load_data_from_json(json_path):\r\n print(\"\\nLoading data from json file\")\r\n with open(json_path, \"r\") as fp:\r\n data = json.load(fp)\r\n \r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def make_prob_dictionary(to_read, probabilities):\n\tf = open(to_read)\n\tfor i in f:\n\t\tx = i.strip().split()\n\t\tprobabilities[x[0][:-1]] = float(x[1])\n\tf.close()\n\treturn probabilities" ]
[ "0.6095132", "0.60795057", "0.5984798", "0.59495234", "0.57160014", "0.56639713", "0.5619954", "0.5497838", "0.5492578", "0.53609985", "0.5356933", "0.53483236", "0.5318638", "0.53132015", "0.52867985", "0.52783847", "0.5249616", "0.52431166", "0.5227828", "0.5201468", "0.5199053", "0.51935226", "0.5189107", "0.5179041", "0.5149647", "0.51165074", "0.5102031", "0.5096326", "0.5090816", "0.5088783", "0.5086852", "0.5080622", "0.5073779", "0.5073296", "0.5060697", "0.5046928", "0.5046558", "0.50287247", "0.5026949", "0.50180686", "0.5016075", "0.50130355", "0.50026554", "0.5001216", "0.49980596", "0.4996402", "0.49890676", "0.49890676", "0.49890676", "0.4986758", "0.49859414", "0.49787596", "0.4969968", "0.4960375", "0.49570152", "0.4954941", "0.4952355", "0.49492177", "0.4943843", "0.49408382", "0.49369943", "0.49347153", "0.492824", "0.49263453", "0.492026", "0.49105242", "0.4908138", "0.490506", "0.48986977", "0.48953092", "0.4893569", "0.48904783", "0.48821586", "0.48724937", "0.48724937", "0.486996", "0.48421663", "0.48290804", "0.48269224", "0.4823376", "0.48148617", "0.48131433", "0.48109567", "0.48080242", "0.4801956", "0.4798304", "0.47931883", "0.47918406", "0.47878006", "0.47878006", "0.47793594", "0.47790843", "0.4776505", "0.47752303", "0.47720295", "0.4771424", "0.47654298", "0.47630885", "0.47619504", "0.47552955" ]
0.85232556
0
Updates the Reaction Probabilities
def update(self, rxn_probs): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prob_update(self):\n pass", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n\n # count the genes for the person\n geneCount = 0\n if person in one_gene:\n geneCount = 1\n elif person in two_genes:\n geneCount = 2\n\n # check the trait\n hasTrait = False\n if person in have_trait:\n hasTrait = True\n\n # update the probabilities\n probabilities[person][\"gene\"][geneCount] += p\n probabilities[person][\"trait\"][hasTrait] += p", "def update(self):\n\n # get states, actions, rewards and total timesteps from memory\n states, actions, R, T = self.memory.get()\n n_ep = len(R)\n\n # compute value estimates for the states\n v = self.critic(states)\n\n # compute advantages (using GAE) and rewards to go\n A, rtg = utils.gae_rtg((R, v, T), self.gam, self.lam)\n\n # store the initial version of both the policy and the log probs of the\n # actions for later comparison with the future versions (needed for PPO)\n policy_old = copy.deepcopy(self.policy)\n log_probs_old = policy_old(states).log_prob(actions)\n\n # sample from a batch of experiences\n # (\"_\" subscript indicates \"sampled from\")\n for (v_, A_, rtg_, log_probs_old_), i in utils.sample_batch((v, A, rtg, log_probs_old), self.batch_size, self.policy_updates):\n log_probs_ = self.policy(states).log_prob(actions)[i]\n\n # estimate ratio between the new log probs and the old ones\n r_ = torch.exp(log_probs_ - log_probs_old_)\n\n l_1 = r_ * A_\n l_2 = torch.clamp(r_, 1-self.eps, 1+self.eps) * A_\n\n # TODO: implement entropy\n # TODO: merge policy and critic\n\n # surragate loss function for PPO\n l_clip = -torch.mean(torch.min(l_1, l_2))\n\n # update the policy\n self.policy_optimizer.zero_grad()\n l_clip.backward(retain_graph=True)\n self.policy_optimizer.step()\n\n # sample a batch of value estimates and the corresponding rewards to go\n # to update the value function.\n for (v_, rtg_), _ in utils.sample_batch((v, rtg), self.batch_size, self.v_updates):\n # compute the loss\n critic_loss = F.mse_loss(v_, rtg_)\n\n # update the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward(retain_graph=True)\n self.critic_optimizer.step()\n\n # clear the memory. PPO is an On-Policy method so we don't need these\n # memories anymore\n self.memory.clear()\n\n # return the loss of the value function for display\n return F.mse_loss(v, rtg)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"penalty.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total", "def modify_rates(self):\n if self.modified:\n print 'Already Modified Probabilities'\n elif self.varGiven:\n print 'You must enter the conditional coalescent probabilties if you want to supply variance of'\n print 'the coalescent probabilities. Required since we cannot compute the variance of the conditionals'\n print 'given the variance of the marginals. Assuming that you gave the conditional probs.'\n else:\n testrates = self.obsRates.copy()\n tratesum = testrates.cumsum(1)\n nocoal = 1 - tratesum\n nocoal = nocoal[:, :-1]\n nocoal = np.hstack((np.ones((np.shape(nocoal)[0], 1)), nocoal))\n testrates = testrates.getA() / (nocoal.getA() + 1e-200)\n self.modified = True\n self.obsRates = np.matrix(np.max([np.min([testrates, np.ones(np.shape(testrates))], 0), np.zeros(np.shape(testrates))], 0))", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def update_policy_values(policy_lookup, board, state, player, action, next_state, reward):\n\t\n\t# compute total expected reward including future rewards\n\tif board.check_end():\n\t\texpected = reward\n\telse:\n\t\tif player == 1:\n\t\t\texpected = reward + discount * min_value(policy_lookup, next_state, 2)\n\t\telif player == 2:\n\t\t\texpected = reward + discount * max_value(policy_lookup, next_state, 1)\n\t# get current policy action value\n\tpolicy_value = get_policy_value(policy_lookup, state, player, action)\n\t# update policy action value\n\tpolicy_lookup[(state, player)][action] += learning_rate * (expected - policy_value)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n best_action = self.computeActionFromQValues(nextState)\n self.QValueCounter[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action) +\n self.alpha * (reward + self.discount * self.getQValue(nextState,\n best_action)))", "def updatePropensity(self, k, rewardType=SUBMISSION, commentCount = 0):\r\n reward = self.rewardFunctions[self.R](rewardType, commentCount)\r\n #print \"Reward: %s\" %reward\r\n #print \"Propensity before updating: %s\" %(self.propensity.weights[k])\r\n self.propensity.update(k, reward)\r\n #print \"Propensity after updating: %s\" %(self.propensity.weights[k])\r", "def update(self, reward):\n self.r_sum[self.last_action] += reward\n self.action_count[self.last_action] += 1\n\n n = np.sum(self.action_count, axis=0)\n \n # If a baseline is used, it should be the average reward obtained on all actions chosen\n # not the average on the last action chosen (np.sum(self.r_sum)/n instead of \n # self.r_sum[self.last_action]/n )\n reward_avg = np.sum(self.r_sum, axis=0)/n if self.baseline else 0 \n\n # Vectorization of update rules into a single one\n mask = np.zeros(len(self.H))\n mask[self.last_action] = 1 # mask is equivalent to 1x==a\n\n # We update preferences, we increase (or decrease) the last action taken based on the reward obtained\n # compared to the average reward obtained and modify the rest of the action in the opposite direction\n self.H += self.alpha*(reward - reward_avg)*(mask - self.last_pi)", "def update(self, obs, actions, rewards, new_obs):\n pass", "def update(self, state, action, nextState, reward):\n # print \"Update\"\n difference = (reward + self.discount*self.compValFromState(nextState)) - self.getQValue(state, action)\n features = self.featExtractor.getFeatures(state, self.index)\n #print \"features\", features, \"difference\", difference, \"weights\", self.weights\n for key in self.weights:\n self.weights[key] = self.alpha * difference * features[key]", "def update_step(self, replay_buffer_iter):\n\n transition = next(replay_buffer_iter)\n states = transition.observation[:, 0]\n actions = transition.action[:, 0]\n rewards = transition.reward[:, 0]\n next_states = transition.observation[:, 1]\n discounts = transition.discount[:, 0]\n\n next_actions, _ = self.actor(next_states, sample=True, with_log_probs=True)\n\n # entropy_rewards = self.discount * discounts * self.alpha * next_log_probs\n # rewards -= entropy_rewards\n critic_dict = self.fit_critic(states, actions, next_states, next_actions,\n rewards, discounts)\n actor_dict = self.fit_actor(states)\n\n return {**actor_dict, **critic_dict}", "def _update(self, results):\n logger = getLogger(\"problog_lfi\")\n fact_body = defaultdict(int)\n fact_par = defaultdict(int)\n\n score = 0.0\n for m, pEvidence, result in results:\n par_marg = dict()\n for fact, value in result.items():\n # use the id and the t variables as index\n index = (fact.args[0], fact.args[1])\n if fact.functor == \"lfi_body\":\n fact_body[index] += value * m\n elif fact.functor == \"lfi_par\":\n if index in par_marg:\n par_marg[index] += value\n for o_index in self._adatomc[index[0]]:\n par_marg[(o_index, *index[1:])] += value\n else:\n par_marg[index] = value\n for o_index in self._adatomc[index[0]]:\n par_marg[(o_index, *index[1:])] = value\n\n for index, value in par_marg.items():\n fact_par[index] += value * m\n try:\n score += math.log(pEvidence)\n except ValueError:\n logger.debug(\"Pr(evidence) == 0.0\")\n\n update_list = fact_body\n\n weight_changed = []\n for weight in self._weights:\n if isinstance(weight, float):\n weight_changed.append(False)\n elif isinstance(weight, dict):\n d = dict()\n for w in weight:\n d[w] = False\n weight_changed.append(d)\n\n score = 0.0\n for index in update_list:\n if float(fact_body[index]) <= 10**-15:\n # if close to zero\n prob = 0.0\n else:\n prob = float(fact_body[index]) / float(fact_par[index])\n try:\n score += math.log(prob)\n except ValueError as ex:\n # prob too close to zero\n pass\n\n logger.debug(\n \"Update probabilistic fact {}: {} / {} = {}\".format(\n index, fact_body[index], fact_par[index], prob\n )\n )\n self._set_weight(index[0], index[1], prob, weight_changed=weight_changed)\n if not index[1]:\n weight_changed[int(index[0])] = True\n elif (\n isinstance(weight_changed[int(index[0])], dict)\n and index[1] in weight_changed[int(index[0])]\n ):\n weight_changed[int(index[0])][index[1]] = True\n else:\n weight_changed[int(index[0])] = {index[1]: True}\n\n if self._enable_normalize:\n self._normalize_weights()\n\n return score", "def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)", "def update(self, s_p, actions, reward):\n qValueCurrent = self.getQvalue(self.s, self.action)\n feature = self.getFeatures(self.s, self.action)\n qValue_p = self.valueFromQvalues(s_p, actions)\n # reward = self.rewards(s_p)\n diff = (reward + self.gamma*qValue_p)-qValueCurrent\n for k in feature.keys():\n self.weights[k] = self.weights[k] + self.alpha*diff*feature[k]", "def state_update(self, evidence: List, return_mode: str = 'letter'):\n sample = uniform(len(self.alp))\n pairs = list(zip(self.alp, sample.tolist()))\n priors = {return_mode: pairs}\n\n self.log.debug(\"Language Model Random probabilities:\")\n self.log.debug(priors)\n return priors", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(state), action)] = val", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"dataRealGoal.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"dataFakeGoal1.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n # print \"update\"\n oldValue = self.getQValue(state, action)\n sample = reward + self.discount*self.computeValueFromQValues(nextState)\n self.qValues[(state, action)] = (1-self.alpha)*oldValue + self.alpha*(sample)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"dataFakeGoal.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def update(self, action, reward):\n # like puseudo count\n a, b = self.ActionValue[action]\n #print(f\"UPDATE {action}: ({a}, {b})\")\n a = a + self.huber(reward) # The larger the reward, the easier it is to select\n b = b + 1 - self.huber(reward) # It becomes easy to be selected as the reward becomes larger, and it becomes difficult to be selected as the reward becomes smaller\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n \n self.ActionValue[action] = (a, b)\n\n #print(f\"=> ({a}, {b})\")\n\n # Update nearby action candidates\n around_update_rate = 0.3 # Parameter to adjust the degree of change according to the distance; [0, 1]\n radius = np.sqrt(self.action_resolution**2 + self.action_resolution**2 + 1e-9) # 1e-9 is for safety to caluculate the small number \n for action_around in self.actions:\n if action_around == action:\n continue\n x = action_around[0] - action[0]\n y = action_around[1] - action[1]\n distance = np.sqrt(x**2 + y**2)\n if distance <= radius:\n a, b = self.ActionValue[action_around]\n #print(f\"UPDATE {action_around}: ({a}, {b})\")\n a = a + self.huber(reward) * around_update_rate * (1 - distance)\n b = b + (1 - self.huber(reward)) * around_update_rate * (1 - distance) # To adjust the update, weight 1-r. If normal update is 1, it will be the update of around_update_rate * (1-distance) for adjacent actions.\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n\n #print(f\"=> ({a}, {b})\")\n\n self.ActionValue[action_around] = (a, b)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]\n\n #if self.epsilon > self.epsilon_min:\n # self.epsilon *= self.epsilon_decay", "def assignProbablities(self, gameState):\n legalActions = gameState.getLegalActions()\n numDiceActive = sum(gameState.numDicePerPlayer)\n probActionTuples = []\n\n for action in legalActions:\n currentHand = gameState.hands[self.agentIndex]\n currentAction = action\n remainingTotalDice = gameState.totalNumDice - gameState.numDicePerPlayer[self.agentIndex]\n assert remainingTotalDice > 0\n remainingActionCount = currentAction[2] - currentHand[currentAction[1]]\n if remainingActionCount > remainingTotalDice:\n if action[0] == 'deny':\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n elif remainingActionCount > 0:\n # or (action[0] == \"confirm\" and remainingActionCount == 0)\n if action[0] == \"bid\":\n probActionTuples.append((self.bidProbability(remainingTotalDice, remainingActionCount), action))\n elif action[0] == \"deny\":\n probActionTuples.append((1 - self.bidProbability(remainingTotalDice, remainingActionCount), action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n elif remainingActionCount == 0:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n elif action[0] == \"deny\":\n probActionTuples.append((0, action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n else:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n\n return probActionTuples", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n self.qValues[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action)) + self.alpha \\\n * (reward + self.discount * self.computeValueFromQValues(nextState))", "def update_score():\n pass", "def update(self, state, action, nextState, reward):\n self.qvals[(state, action)] = self.qvals[(state, action)] + self.alpha * (\n reward + self.discount * self.computevaluefromqvalues(nextState) - self.qvals[(state, action)])", "def predict_action_probabilities(self, tracker, domain):\n # type: (DialogueStateTracker, Domain) -> (float, Optional[int])\n x = self.featurize(tracker, domain)\n logger.debug('Current tracker state [\\n\\t{}]'.format(\n \"\\n\\t\".join([\"{}\".format(e) for e in self.featurizer.decode_features(x, domain.input_features)])))\n\n memorised = self.recall(x, domain)\n result = [0.0] * domain.num_actions\n if memorised is not None and self.is_enabled:\n logger.debug(\"Used memorised next action '{}'\".format(memorised))\n result[memorised] = 1.0\n return result", "def update_critic_weights(self, states, actions, new_states, dones, rewards):\n with tf.GradientTape(True) as tape:\n noise = (\n tf.random.normal(\n (self.buffers[0].batch_size * self.n_envs, self.n_actions)\n )\n * self.policy_noise_coef\n )\n noise = tf.clip_by_value(noise, -self.noise_clip, self.noise_clip)\n new_actions = tf.clip_by_value(\n self.target_actor(new_states) + noise, -1.0, 1.0\n )\n target_critic_input = tf.concat(\n [tf.cast(new_states, tf.float64), tf.cast(new_actions, tf.float64)], 1\n )\n target_value1 = self.target_critic1(target_critic_input)\n target_value2 = self.target_critic2(target_critic_input)\n target_value = tf.minimum(target_value1, target_value2)\n target_value = rewards + tf.stop_gradient(\n (1 - dones) * self.gamma * target_value\n )\n critic_input = tf.concat([states, actions], 1)\n value1 = self.critic1(critic_input)\n value2 = self.critic2(critic_input)\n critic1_loss, critic2_loss = MSE(value1, target_value), MSE(\n value2, target_value\n )\n self.critic1.optimizer.minimize(\n critic1_loss, self.critic1.trainable_variables, tape=tape\n )\n self.critic2.optimizer.minimize(\n critic2_loss, self.critic2.trainable_variables, tape=tape\n )", "def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def mutate1(self, probability):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = np.random.normal(0, 1)", "def act(self, observation):\n self.t += 1\n\n probabilities = self.probabilities(observation)\n\n probabilities *= self.action_mask(observation[0])\n\n if probabilities.sum() == 0.0:\n probabilities += 1.0\n\n return torch.multinomial(probabilities, num_samples=1)[0]", "def update_reward(state, reward, max_reward, alpha=1, c=100, gamma=0.9):\n\t\t# update number of actions done so far to this state\n\t\tactions[state] = actions.get(state, 0.0) + 1.0\n\t\t# compute learning rate\n\t\talpha *= c / (c + actions[state])\n\t\trewards[state] = rewards.get(state, 0.0) + alpha*(reward+gamma*max_reward-rewards.get(state, 0.0))", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def initialize_probabilities(self):\n self.probabilities = np.ndarray((2, len(self.variables)), dtype=np.float)\n self.update_probabilities()", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities): self.tree.val_update(i, float(p**self.alpha))", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self, state, action, nextState, reward):\n candidateQ = reward + self.discount * \\\n self.computeValueFromQValues(nextState)\n currentQ = self.getQValue(state, action)\n difference = candidateQ - currentQ\n features = self.featExtractor.getFeatures(state, action)\n for feat in features:\n self.weights[feat] += self.alpha * difference * features[feat]", "def __init__(self, mdp, discount = 0.9, iterations = 100):\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Repeat for given numbe of iterations\n for _ in range(self.iterations):\n\n\n values2 = self.values.copy()\n # Iterate over all States\n for s in self.mdp.getStates():\n\n # If Terminal, update utility of this state to be the reward of this terminal\n if self.mdp.isTerminal(s):\n values2[s] = self.mdp.getReward(s,None,None)\n\n else:\n\n # Determine Update for each State by iterating over possible Actions\n m = float(\"-inf\")\n for a in self.mdp.getPossibleActions(s):\n\n # Calculate Expected Utility of taking this Action\n x = 0\n for t in self.mdp.getTransitionStatesAndProbs(s,a):\n x += t[1]*self.getValue(t[0])\n\n # Keep track of the maximum expected utility of each of the Actions\n m = max(m,x)\n\n # Calculate/Save Updated Utilities\n update = self.mdp.getReward(s,None,None) + self.discount*m\n values2[s] = update\n\n # Now Update Utilities\n for k in values2.keys():\n self.values[k] = values2[k]", "def update(self, state, action, nextState, reward):\n oldComponent = (1-self.alpha) * self.getQValue(state, action)\n nextValue = self.computeValueFromQValues(nextState)\n sample = reward + self.discount * nextValue\n newComponent = self.alpha * sample\n self.qValues[(state, action)] = oldComponent + newComponent", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]", "def reward(self, observation, action, reward):\n self.iter[action] += 1\n self.expvalue[action] += 1/self.iter[action]*(reward - self.expvalue[action])", "def update_probs(flip,prob_coins,coins):\n if flip == 'H':\n joint_prob_sum = 0\n for x in range(len(prob_coins)):\n joint_prob_sum += (prob_coins[x] * coins[x])\n new_prob_coins = []\n for x in range(len(prob_coins)):\n new_prob_coin = prob_coins[x] * coins[x] / joint_prob_sum\n new_prob_coins.append(new_prob_coin)\n return new_prob_coins\n else:\n joint_prob_sum = 0\n for x in range(len(prob_coins)):\n joint_prob_sum += (prob_coins[x] * (1-coins[x]))\n new_prob_coins = []\n for x in range(len(prob_coins)):\n new_prob_coin = (prob_coins[x] * (1-coins[x])) / joint_prob_sum\n new_prob_coins.append(new_prob_coin)\n return new_prob_coins", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def _populate_probabilities(self):\n\n for state in range(self.n_states):\n x, y = index_to_position(state, self.columns)\n\n if state == self.absorbing_state or self.world[x, y] in ('£', '$'):\n self._p[state, self.absorbing_state, :] = 1\n continue\n\n for action in range(self.n_actions):\n next_state = state\n next_x, next_y = x + self.actions[action][0], y + self.actions[action][1]\n if 0 <= next_x < self.rows and 0 <= next_y < self.columns and self.world[next_x, next_y] != '#' and \\\n self.world[x, y] != '#':\n next_state = position_to_index(next_x, next_y, self.columns)\n\n self._p[state, next_state, action] = 1", "def update_frequencies():\n pass", "def _update_suspicion_1(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def update_q(self,action,reward):\n #print('')\n #print('Action index is: ' + str(action))\n #print('Provided reward is: ' + str(reward))\n \n # Read from disk before updating\n try:\n pickle_in = open(\"static/data/values.pickle\",\"rb\")\n values = pickle.load(pickle_in)\n #print(values)\n self.values = values\n pickle_in = open(\"static/data/counts.pickle\",\"rb\")\n self.counts = pickle.load(pickle_in)\n pickle_in = open(\"static/data/actions_taken.pickle\",\"rb\")\n actions_taken = pickle.load(pickle_in)\n pickle_in = open(\"static/data/reward_list.pickle\",\"rb\")\n reward_list = pickle.load(pickle_in)\n except:\n actions_taken = []\n reward_list = []\n pass\n \n self.counts[action] += 1\n n = self.counts[action]\n value = self.values[action]\n actions_taken.append(action)\n reward_list.append(reward)\n \n # Running product\n new_value = value + (1/n) * (reward - value)\n self.values[action] = new_value\n \n \n # Save to disk before exiting\n pickle_out = open('static/data/values.pickle','wb')\n pickle.dump(self.values, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/counts.pickle','wb')\n pickle.dump(self.counts, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/actions_taken.pickle','wb')\n pickle.dump(actions_taken, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/reward_list.pickle','wb')\n pickle.dump(reward_list, pickle_out)\n pickle_out.close()", "def updateWeights(self, message):\n prefWeights = [self.prefWghts1.GetValue(), \n self.prefWghts2.GetValue(), \n self.prefWghts3.GetValue()]\n\n self.model.setWeights(prefWeights, \n self.Prefs.GetValue(), \n self.ExcessCap.GetValue(), \n self.CongPenalty.GetValue(), \n self.DeptFairness.GetValue(), \n self.Back2Back.GetValue())", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def update(steps, states, actions, rewards):\n G = self._Q[states[steps[0]]][actions[steps[0]]]\n for k in steps:\n if done:\n G = rewards[k - 1]\n else:\n s_k, a_k = states[k], actions[k]\n V = sum([\n self.epsilon_greedy_policy(s_k, a) * self._Q[s_k][a]\n for a in range(self.env.nA)\n ])\n G = (\n rewards[k - 1]\n + discount\n * (self.sigma(k) + (1 - self.sigma(k))\n * self.epsilon_greedy_policy(s_k, a_k))\n * (G - self._Q[s_k][a_k])\n + discount * V\n )\n tau = steps[-1] - 1\n self._Q[states[tau]][actions[tau]] += (\n alpha * (G - self._Q[states[tau]][actions[tau]])\n )", "def UpdateNode(self, result, actions):\n self.visits += 1\n self.wins += (result > 0)\n self.losses += (result < 0)\n self.draws += (result == 0)\n self.Q = self.Q + (result - self.Q)/self.visits\n \n # update rave values\n for a in actions:\n self.N_AMAF[a] += 1\n if not a in self.Q_AMAF:\n self.Q_AMAF[a] = 0.5\n self.Q_AMAF[a] = self.Q_AMAF[a] + (result - self.Q_AMAF[a])/self.N_AMAF[a]\n else:\n self.Q_AMAF[a] = self.Q_AMAF[a] + (result - self.Q_AMAF[a])/self.N_AMAF[a]", "def _update_episode(self):\n self._publish_reward_topic(\n self.cumulated_episode_reward,\n self.episode_num\n )\n self.episode_num += 1\n self.cumulated_episode_reward = 0", "def update_sample_reward(self, r):\n self.r_exp[-1] += r * self.gamma ** (len(self.u_seq[-1])-1)", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n self.minmax = max(self.values) - min(self.values)\n \n \n pass", "def update(self, obs, actions, rewards, new_obs):\n a0, a1 = actions\n r0, _ = rewards\n\n self.Dir[a1] += 1 # Update beliefs about adversary\n\n aux = np.max( np.dot( self.Q[new_obs], self.Dir/np.sum(self.Dir) ) )\n self.Q[obs, a0, a1] = (1 - self.alpha)*self.Q[obs, a0, a1] + self.alpha*(r0 + self.gamma*aux)", "def update_counts(self, new_alpha, new_beta, decay):\n\n self._alpha = self._alpha / decay + new_alpha\n self._beta = self._beta / decay + new_beta\n self._n_updates += 1", "def update(self, obs, actions, rewards, new_obs):\n a0, a1 = actions\n r0, _ = rewards\n\n if obs[0] == None:\n unif = np.ones(len(self.action_space))\n #aux = np.max( np.dot( self.Q[new_obs[0],new_obs[1],:,:], unif/np.sum(unif) ) )\n else:\n self.Dir[obs[0],obs[1],a1] += 1 # Update beliefs about adversary\n #aux = np.max( np.dot( self.Q[new_obs[0],new_obs[1],:,:],\n # self.Dir[new_obs[0],new_obs[1],:]/np.sum(self.Dir[new_obs[0],new_obs[1],:]) ) )\n\n #self.Q[obs[0], obs[1], a0, a1] = ( (1 - self.alpha)*self.Q[obs[0], obs[1], a0, a1] +\n # self.alpha*(r0 + self.gamma*aux) )", "def update(self, sample, oppo_target_policy, oppo_policy, parallel=False, logger=None,iter=5):\n obs, acs, rews, next_obs, dones = sample\n\n self.critic_optimizer.zero_grad()\n # if self.alg_types[agent_i] == 'MADDPG':\n if self.discrete_action: # one-hot encode action\n if self.agent_i ==0:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n # zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n if self.agent_i ==0:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [pi(nobs) for pi, nobs in zip(self.target_policy,\n # next_obs)]\n trgt_vf_in = torch.cat((*next_obs, *all_trgt_acs), dim=1)\n\n if self.discrete_action:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *\n self.target_critic(trgt_vf_in) *\n (1 - dones[self.agent_i].view(-1, 1))) #change after\n else:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *self.target_critic(trgt_vf_in)*(dones.view(-1, 1)))\n\n vf_in = torch.cat((*obs, *acs), dim=1)\n actual_value = self.critic(vf_in)\n vf_loss = MSELoss(actual_value, target_value.detach())\n vf_loss.backward()\n\n torch.nn.utils.clip_grad_norm(self.critic.parameters(), 0.5)\n self.critic_optimizer.step()\n\n self.policy_optimizer.zero_grad()\n\n if self.discrete_action:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = gumbel_softmax(curr_pol_out, hard=True)\n else:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = curr_pol_out\n\n all_pol_acs = []\n if self.discrete_action:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[1])))\n else:\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[0])))\n all_pol_acs.append(curr_pol_vf_in)\n else:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(oppo_policy(obs[1]))\n else:\n all_pol_acs.append(oppo_policy(obs[0]))\n all_pol_acs.append(curr_pol_vf_in)\n\n #\n # for i, ob in zip(range(self.nagents), obs):\n # if i == self.agent_i-1:\n # all_pol_acs.append(curr_pol_vf_in)\n # elif self.discrete_action:\n # all_pol_acs.append(onehot_from_logits(self.policy(ob)))\n # else:\n # all_pol_acs.append(self.policy(ob))\n\n vf_in = torch.cat((*obs, *all_pol_acs), dim=1)\n\n pol_loss = -self.critic(vf_in).mean()\n pol_loss += (curr_pol_out**2).mean() * 1e-3\n pol_loss.backward()\n total_norm=0\n for p in self.policy.parameters():\n param_norm = p.grad.data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm ** (1. / 2)\n torch.nn.utils.clip_grad_norm(self.policy.parameters(), 0.5)\n self.policy_optimizer.step()", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)", "def assign_percent_real(session, percent_real_update, new_rate, current_rate):\n session.run(percent_real_update, feed_dict={new_rate: current_rate})", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def update(self, reward):\n raise NotImplementedError", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def update(self, obs, actions, rewards, new_obs):\n a0, a1 = actions\n r0, _ = rewards\n\n if obs[0] == None:\n unif = np.ones(len(self.action_space))\n aux = np.max( np.dot( self.Q[new_obs[0],new_obs[1],:,:], unif/np.sum(unif) ) )\n else:\n self.Dir[obs[0],obs[1],a1] += 1 # Update beliefs about adversary\n aux = np.max( np.dot( self.Q[new_obs[0],new_obs[1],:,:],\n self.Dir[new_obs[0],new_obs[1],:]/np.sum(self.Dir[new_obs[0],new_obs[1],:]) ) )\n\n self.Q[obs[0], obs[1], a0, a1] = ( (1 - self.alpha)*self.Q[obs[0], obs[1], a0, a1] +\n self.alpha*(r0 + self.gamma*aux) )", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def responseProb(obs, dt, n1, n2, pc, scaling, prevInternalState, reward, costM, costS,\n pRes): \n #0 is default, 1 is cue\n respond = 2; internalState = np.nan; payofftoA = 0; payofftoD = 0\n p = np.full((len(obs)+1,2), np.nan) #array of posterior prob for default, cue\n fs = np.full((len(obs)+1,2), np.nan) #array of scaled f values for default, cue\n \n transition1 = np.array([[1, 0],[0,1]]) #transition probabilities in general\n e = np.array([[n1,1-n1],[1-n2,n2]]) #emission probabilities\n foreperiodSteps = int((6/dt)+1)\n \n \n fs[0,:] = np.array([1,0])\n p[0,:] = fs[0,:]/np.sum(fs[0,:])\n \n #inference process \n for i in range(len(obs)):\n if i < foreperiodSteps:\n r = 1/(foreperiodSteps-i)\n #print(r, i, sep= ' ')\n transition2 = np.array([[1-pc*r,pc*r],[0,1]])\n #transition probability in foreperiod, before transition\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition2)\n #calculaitng joint probabilities\n else:\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition1)\n #calculaitng joint probabilities\n \n p[i+1, :] = fs[i+1,:]/np.sum(fs[i+1,:]) #posterior probabilites\n \n #response process\n \n #calculating payoffs\n if prevInternalState == 'default' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costS\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n elif prevInternalState == 'active' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costM\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n \n \n #deciding internal state based on payoffs\n if payofftoA > payofftoD :\n internalState = 'active'\n k = np.random.binomial(1,pRes[1,1]) #probabilistic response in A\n if k == 1:\n respond = 1\n elif k == 0:\n respond = 0\n \n elif payofftoA < payofftoD :\n internalState = 'default'\n k = np.random.binomial(1,pRes[0,0]) #probabilistic response in D\n if k == 1:\n respond = 0\n elif k == 0:\n respond = 1\n \n \n return respond, internalState, p", "def update(self, preds: Tensor, targets: Tensor) -> None:\n sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, self.power)\n\n self.sum_deviance_score += sum_deviance_score\n self.num_observations += num_observations", "def update_reward(self, observation, action, reward):\n self.q_table[observation,action] = (1 - self.count(observation,action)) * self.q_table[observation,action] + self.count(observation,action) * reward # Canonical Q-update\n self.increment_count(observation,action)", "def probabilities(self):\n raise NotImplementedError", "def p(self) -> Probability:\n ...", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def update(self, observations: Observations, action: CARLAAction,\n reward: float, new_observations: Observations, *args: Any,\n **kwargs: Any) -> None:\n if new_observations[\"collision\"] > 0:\n self.value += 1", "def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def update_recruiting(self, rate):\n self.recruit = int(np.ceil(self.INITIAL_POPULATION*rate))", "def updateProb(chromosome, sum):\n prob = chromosome[2]/(sum)\n chromosome.append(prob)\n return prob", "def __init__(self, mdp, discount = 0.9, iterations = 100):\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Write value iteration code here\n \"*** YOUR CODE HERE ***\"\n #states = mdp.getStates()\n #values = {state: 0 for state in states}\n for i in range(iterations):\n previous = self.values.copy()\n for state in mdp.getStates():\n possibleActions = mdp.getPossibleActions(state)\n if len(possibleActions) == 0: continue\n results = []\n for action in possibleActions:\n total = 0\n for (nextState, prob) in mdp.getTransitionStatesAndProbs(state,action):\n total += (prob * previous[nextState])\n results.append(total)\n self.values[state] = mdp.getReward(state) + (discount * max(results))", "def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def refresh(self):\n self.reward = 0\n self.score = 0", "def update(self, probs: torch.Tensor):\n tree, capacity = self._create_tree(probs, self.tree)\n self.tree = tree\n self.capacity = capacity", "def _update_distribution(self, trajectories):\n costs = trajectories[\"costs\"].copy()\n actions = trajectories[\"actions\"].copy()\n Q = cost_to_go(costs, self.gamma_seq)\n best_id = np.argmin(Q, axis = 0)[0]\n self.mean_action = (1.0 - self.step_size) * self.mean_action +\\\n self.step_size * actions[best_id]" ]
[ "0.7125513", "0.7022864", "0.6515388", "0.64963096", "0.64798397", "0.64248484", "0.64184403", "0.63899285", "0.6279894", "0.6097903", "0.6097903", "0.6088608", "0.6073833", "0.6068825", "0.60576266", "0.6045901", "0.6018484", "0.60050225", "0.5990919", "0.5990202", "0.5989244", "0.5974224", "0.59716576", "0.5959715", "0.59595513", "0.5953228", "0.5944343", "0.593527", "0.5922228", "0.5902062", "0.58818173", "0.5828941", "0.5813957", "0.58078384", "0.57998765", "0.57877964", "0.578148", "0.57791954", "0.57682997", "0.5766189", "0.5762089", "0.5761819", "0.5761819", "0.5761819", "0.57582986", "0.5758245", "0.57437015", "0.57437015", "0.57437015", "0.57437015", "0.57426333", "0.57301205", "0.57291627", "0.5727726", "0.5725537", "0.57235205", "0.57216936", "0.5708444", "0.57045615", "0.5692656", "0.568829", "0.5686114", "0.56769115", "0.5668141", "0.56535846", "0.5653463", "0.56529397", "0.56466305", "0.56463575", "0.5645889", "0.5639334", "0.56392103", "0.5618401", "0.5613154", "0.5611698", "0.5609983", "0.56093913", "0.5590634", "0.55901295", "0.55901295", "0.5589518", "0.5577253", "0.55633825", "0.5560732", "0.55589867", "0.5552343", "0.5548332", "0.5540909", "0.5540761", "0.5529514", "0.5526446", "0.5518469", "0.55178326", "0.5516409", "0.55123544", "0.55104256", "0.55052584", "0.5504536", "0.5495208", "0.54913896" ]
0.6925041
2
Takes a big limit as an integer and get all the prime numbers in that range, including the limit itself. Returns a numpy array of the primes. Fragmentation is an int that multiplies the sqrt of the limit to increase the fragment size. Bigger fragmentation consumes more memory and less time. Fragmentation limit = sqrt of limit. For 4 GB RAM not enough memory for limit == 109. Fragmentation 1000 ok
def get_primes_in_big_limit(limit, fragmentation=1): print("Getting primes...") print("Fragmentation set to", fragmentation) fragment_limit = int(np.sqrt(limit)) fragment_lowest = 0 fragment_highest = fragment_lowest + fragment_limit primes_in_limit = np.array([], dtype=int) while fragment_highest < limit: if fragment_lowest == 0: fragment_highest += 1 primes_in_first_fragment = get_primes_in(fragment_highest) primes_in_limit = np.concatenate([primes_in_limit, primes_in_first_fragment], axis=None) else: primes_in_fragment = get_primes_in_fragment(fragment_lowest, fragment_highest, primes_in_first_fragment ) primes_in_limit = np.concatenate([primes_in_limit, primes_in_fragment], axis=None) fragment_lowest = fragment_highest fragment_highest += (fragment_limit * fragmentation) primes_in_last_fragment = get_primes_in_fragment(fragment_lowest, limit+1, primes_in_first_fragment ) return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]", "def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)", "def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)", "def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes", "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def get_primes_in_fragment(fragment_lowest, fragment_highest,\n primes_in_first_fragment):\n fragment_range = np.arange(fragment_lowest, fragment_highest)\n prime_mask = np.ones(len(fragment_range), dtype=bool)\n for p in primes_in_first_fragment:\n if fragment_lowest % p == 0:\n first_multiple = fragment_lowest // p\n else:\n first_multiple = fragment_lowest // p + 1\n first_multiple_index = first_multiple * p - fragment_lowest\n prime_mask[first_multiple_index::p] = False\n return fragment_range[prime_mask]", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes", "def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set", "def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]", "def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]", "def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def get_prime_array(number_of_primes) -> array:\n p = array('i')\n with open(f'prime{number_of_primes}.bin', 'rb') as prime_file:\n p.fromfile(prime_file, number_of_primes) \n return p", "def find_truncatable_primes(limit: int, start_from: int):\n\n start_time = time.time()\n truncatable = set()\n next_prime = primes_generator_iterable(start_from)\n\n while len(truncatable) < limit:\n\n prime = next(next_prime)\n if is_truncatable(prime):\n truncatable.add(prime)\n\n result = sum(truncatable)\n print_time_log(start_time, result)\n return result", "def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n", "def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]", "def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])", "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def primes(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)", "def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes", "def linear_sieve(max_n):\n smallest_factors = [0] * max_n\n primes = []\n\n for i in range(2, max_n):\n if smallest_factors[i] == 0:\n smallest_factors[i] = i\n primes.append(i)\n\n for p in primes:\n if p > smallest_factors[i] or i * p >= max_n:\n break\n smallest_factors[i * p] = p\n return primes, smallest_factors", "def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime", "def prime_number_2d_array(self):\n\n prime_list = utility_obj.get_prime()\n row = 10\n column = 25\n limit = 100\n\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n k = 0\n for i in range(row):\n\n for j in range(column):\n\n if k < len(prime_list):\n if prime_list[k] <= limit:\n two_d_array[i][j] = prime_list[k]\n k += 1\n\n limit += 100\n\n for i in range(row):\n\n for j in range(column):\n\n if two_d_array[i][j] != 0:\n print(two_d_array[i][j], end=\" \")\n\n print()", "def prime_numbers(max_number_eval=100):\n prime_numbers_list = list(next_prime(max_number_eval))\n print('The prime numbers from 2 to {} are:{}'.format(max_number_eval, prime_numbers_list))", "def num_array(lower_limit = 0, upper_limit = 5, increment = 1):\n numbers = []\n while lower_limit < upper_limit:\n numbers.append(lower_limit)\n lower_limit += increment\n return numbers", "def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p", "def prime_numpy_version(n: int) -> List[int]:\n arm = range(2, np.floor(n / 2).astype(int) + 1)\n x, y = np.meshgrid(*([arm] * 2))\n\n Z = range(2, n + 1)\n D = x * y\n Diff = np.setdiff1d\n\n P = Diff(Z, D[D <= n].ravel())\n return P.tolist()", "def sieve(self, upto_num):\n max_cur_known = self.max_known_number()\n \n num_new = upto_num - max_cur_known\n #All new numbers are primes until they are crossed off\n self.number_list.extend(array.array('b', [1])*num_new)\n \n for marker_num in range(2, maths.floor(maths.sqrt(upto_num)) + 1):\n #For efficiency only use prime marked numbers\n if not self.is_prime(marker_num):\n continue\n \n min_x = max(max_cur_known // marker_num + 1, marker_num)\n max_x = upto_num // marker_num\n \n for x in range(min_x, max_x + 1):\n self.number_list[marker_num*x] = 0 # Non-prime", "def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])", "def get_primes(n):\n\n return list(primes_sieve(n))", "def solution(a_limit: int = 1000, b_limit: int = 1000) -> int:\n longest = [0, 0, 0] # length, a, b\n for a in range((a_limit * -1) + 1, a_limit):\n for b in range(2, b_limit):\n if is_prime(b):\n count = 0\n n = 0\n while is_prime((n ** 2) + (a * n) + b):\n count += 1\n n += 1\n if count > longest[0]:\n longest = [count, a, b]\n ans = longest[1] * longest[2]\n return ans", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec", "def sieve(n):\n\n primes = []\n sieve = [0] * n\n\n for i in range(2, n):\n if sieve[i] == 0:\n primes.append(i)\n sieve[i*i:n:i] = [1] * slice_length(i*i, n, i)\n\n return primes", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def getListOfPrimes(k = 40, n = 1000000):\n\n low = 2 ** (k - 1) # smallest number k bits could be\n lim = min(int(math.sqrt(low)), n + 1) # we don't want to generate any primes larger than n\n\n numList = [True] * lim # initialise boolean list\n primes = [] # initialise list of primes\n\n for i in range(2, lim): # loop through list from index 2\n if numList[i]: # if it is True\n primes.append(i) # must be prime\n\n for j in range(i*i, lim, i): # loop through multiples\n numList[j] = False # setting them to false\n\n return primes # return ptimes", "def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def mult_parities_python(bound, verbose=False):\n v = [None] * bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound, 2)) + 1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity + int(1)) % int(2)\n if verbose:\n print(\"loop {0} (of {1}); last = {2}\".format(k, loops, len(last)))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def sieve(upto):\n return list(prime_numbers(upto))", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def get_primes(s):\n primes = bytearray([1] * s)\n for i in range(2, s):\n if primes[i] == 1:\n for j in range(i, s):\n if i * j < s:\n primes[i * j] = 0\n else:\n break\n return primes", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def get_primes_list(start, end):\r\n primes_list_obj = PrimesList(start, end)\r\n primes_list = primes_list_obj.primes_list()\r\n return primes_list", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def solve_euler7():\n\n\tprime_numbers = [2]\n\ti=3\n\n\twhile len(prime_numbers) < 10001:\n\t\tif is_prime(i) == True:\n\t\t\tprime_numbers.append(i)\n\t\ti+=1\n\t#return len(prime_numbers)\t\n\treturn prime_numbers[10000]", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def factor_base(B):\n prime_list = list(range(2, B))\n idx = 0\n while idx < len(prime_list):\n prime = prime_list[idx]\n prime_list = prime_list[:idx + 1] + [n for n in prime_list[idx + 1:] if n % prime != 0]\n idx += 1\n return prime_list", "def gen_prime(start, end):\n if start <= 0 or end <= 0:\n raise Exception(\"Start and end values must be greater than zero.\")\n\n if start >= end:\n raise Exception(\"The start value cannot be greater than or equal to the end value\")\n\n prime_list = []\n\n for prime in range(start, end):\n if is_prime(prime):\n prime_list.append(prime)\n return prime_list", "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def primes(count):\n\n # START SOLUTION\n\n primes = []\n num = 2\n\n while count > 0:\n\n if is_prime(num):\n primes.append(num)\n count -= 1\n\n num += 1\n\n return primes", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def sieve(endNum):\n\n logger.debug(\"EndNum is %i\" % endNum)\n prime = [True] * (endNum+1)\n primesList = []\n for candidate in range(2, endNum):\n #If number is prime, remove multiples\n if(prime[candidate]):\n primesList.append(candidate)\n for removeNum in range(candidate*2, endNum, candidate):\n prime[removeNum] = False\n else:\n pass\n return primesList", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def primes(numOfPrimes):\n\n primes = []\n # we want to start at 2003, which is the first prime after 2000, seeing as\n # we absolutely need to fit all 2000 keys on the hash table,\n i = 2003\n\n while len(primes) < numOfPrimes:\n isPrime = True\n\n for k in range(2, i):\n if i % k == 0:\n isPrime = False\n break\n\n if isPrime:\n primes.append(i)\n i += 1\n\n return primes", "def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def getKBitPrimes(k = 2 ** 10, n = 2 ** 20):\n\n lim = min(k + 1, n + 1) # we don't want to generate any primes larger than n\n\n numList = [True] * lim # initialise boolean list\n primes = [] # initialise list of primes\n\n for i in range(2, lim): # loop through list from index 2\n if numList[i]: # if it is True\n primes.append(i) # must be prime\n\n for j in range(i*i, lim, i): # loop through multiples\n numList[j] = False # setting them to false\n\n return primes # return ptimes", "def truncatable_primes():\n list_tp = []\n i = 8\n while len(list_tp) < 11:\n if is_truncatable(i):\n list_tp.append(i)\n i += 1\n if i % 100 == 0:\n print(\"i : \", i)\n return list_tp, sum(list_tp)", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def sieve(max):\n\tprimes = [False]*max\n\tfor i in range(2, int(math.sqrt(len(primes)))):\n\t\tif primes[i] == False:\n\t\t\tfor j in range(i*i, max, i):\n\t\t\t\tprimes[j] = True\n\tcount = 0\n\tprint(\"Prime numbers under \", max, \":\", sep='')\n\tfor j in range(2, max):\n\t\tif primes[j] == False:\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tprint(j)\n\t\t\telse:\n\t\t\t\tprint(j, end='\\t')\n\tprint()", "def n_length_primes(n):\n assert n > 0, \"Cannot generate a list of %d length primes.\" % n\n a = []\n for i in range(10**(n-1), 10**n):\n if is_prime(i):\n a.append(str(i))\n return a", "def main():\n limit = 1000\n max_primes = 0\n max_b, max_c = 0, 0\n is_prime = sieve_of_eratosthenes_bool(limit * 100)\n primes = sieve_of_eratosthenes(limit)\n for c in primes:\n for b in range(-c, limit, 2):\n for n in count(1):\n res = n * n + b * n + c\n if res < 1 or not is_prime[res]:\n if max_primes < n:\n max_primes = n\n max_b, max_c = b, c\n print(max_primes, max_b, max_c, end='\\n')\n break\n print(max_b, max_c, max_b * max_c)", "def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res", "def thousand_first_primes() -> List[int]:\n primes = []\n i = 0\n while len(primes) != 1000:\n primes += [i] if premier(i) else []\n i += 1\n return primes", "def primes(max_number_of_primes) -> iter:\n number_primes = count(1)\n prime = prime_generator()\n while next(number_primes) <= max_number_of_primes:\n yield next(prime)", "def find_primes_up_to(up_to=100):\n primes = []\n for i in range(2, up_to + 1):\n if is_prime(i):\n primes.append(i)\n return primes", "def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n print \"numJobs, numWorkers: \", numJobs, numWorkers, chunkSize\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def prime_numbers(x: int):\n A = [True] * x\n A[0] = A[1] = False\n for i in range(2, x, 1):\n if is_simple_number(i):\n for m in range(2 * i, x, i):\n A[m] = False\n n = 0\n for k in range(x):\n print(k, \"is prime\" if A[k] else \"is not prime\")\n if A[k]:\n n += 1\n\n B = [0] * n\n n = 0\n for k in range(x):\n if A[k]:\n B[n] = k\n n += 1\n return B", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def primeGen(n):\n primes = [2, 3, 5, 7, 11]\n if n in xrange(1, len(primes) + 1):\n return primes[:n]\n else:\n banlist = []\n count = 6\n while count <= n:\n Next = (primes[-2] + primes[-1]) - primes[-3]\n if not is_prime(Next):\n count -= 1\n banlist.append(Next)\n count += 1\n primes.append(Next)\n filterout(banlist, primes)\n return primes", "def get_primes(n):\n primes = [True] * (n / 2)\n for i in range(int((n / 2 - 1) / 2) >> 1):\n for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): \n primes[j] = False\n return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]" ]
[ "0.7772032", "0.75551635", "0.754886", "0.71958756", "0.7041358", "0.6890954", "0.6870041", "0.67170656", "0.66442776", "0.6626837", "0.65599114", "0.63915014", "0.6374505", "0.63370544", "0.6328696", "0.6316949", "0.6307289", "0.62010443", "0.6167947", "0.61109275", "0.6107259", "0.60967153", "0.60713685", "0.605898", "0.6058528", "0.6020613", "0.6013907", "0.6006447", "0.5991404", "0.5925827", "0.59214014", "0.58951557", "0.58673596", "0.58389634", "0.5830671", "0.5783098", "0.57782006", "0.57732815", "0.57678086", "0.57637054", "0.5745385", "0.5736291", "0.5728561", "0.5726872", "0.5724385", "0.5707298", "0.5705281", "0.56971216", "0.5691928", "0.5684958", "0.56790006", "0.56780636", "0.56631595", "0.565761", "0.5641129", "0.563995", "0.5639008", "0.56379926", "0.56287426", "0.56258404", "0.55963314", "0.5590206", "0.5581488", "0.55741245", "0.5567183", "0.555614", "0.5554013", "0.5549585", "0.5541211", "0.5534245", "0.55330426", "0.5523821", "0.55153376", "0.5504511", "0.54976434", "0.54965025", "0.54918075", "0.54918075", "0.54875374", "0.5486914", "0.5481609", "0.5478285", "0.54764044", "0.5474015", "0.5473872", "0.547265", "0.54681915", "0.54656076", "0.545689", "0.545683", "0.54560405", "0.54544616", "0.5453182", "0.5451258", "0.54412216", "0.543847", "0.5438311", "0.54337084", "0.54244614", "0.5412289" ]
0.8527422
0
Takes a limit as an integer and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes.
def get_primes_in(limit): range_limit = np.arange(limit) prime_mask = np.ones(limit, dtype=bool) prime_mask[0:2] = False for i in range_limit[:int(np.sqrt(limit))+1]: if prime_mask[i]: prime_mask[2*i::i] = False return range_limit[prime_mask]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)", "def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes", "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)", "def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret", "def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes", "def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)", "def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]", "def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p", "def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers", "def get_primes(n):\n\n return list(primes_sieve(n))", "def prime_numpy_version(n: int) -> List[int]:\n arm = range(2, np.floor(n / 2).astype(int) + 1)\n x, y = np.meshgrid(*([arm] * 2))\n\n Z = range(2, n + 1)\n D = x * y\n Diff = np.setdiff1d\n\n P = Diff(Z, D[D <= n].ravel())\n return P.tolist()", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes", "def sieve(upto):\n return list(prime_numbers(upto))", "def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]", "def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set", "def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime", "def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec", "def getListOfPrimes(k = 40, n = 1000000):\n\n low = 2 ** (k - 1) # smallest number k bits could be\n lim = min(int(math.sqrt(low)), n + 1) # we don't want to generate any primes larger than n\n\n numList = [True] * lim # initialise boolean list\n primes = [] # initialise list of primes\n\n for i in range(2, lim): # loop through list from index 2\n if numList[i]: # if it is True\n primes.append(i) # must be prime\n\n for j in range(i*i, lim, i): # loop through multiples\n numList[j] = False # setting them to false\n\n return primes # return ptimes", "def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)", "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers", "def num_array(lower_limit = 0, upper_limit = 5, increment = 1):\n numbers = []\n while lower_limit < upper_limit:\n numbers.append(lower_limit)\n lower_limit += increment\n return numbers", "def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p", "def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result", "def linear_sieve(max_n):\n smallest_factors = [0] * max_n\n primes = []\n\n for i in range(2, max_n):\n if smallest_factors[i] == 0:\n smallest_factors[i] = i\n primes.append(i)\n\n for p in primes:\n if p > smallest_factors[i] or i * p >= max_n:\n break\n smallest_factors[i * p] = p\n return primes, smallest_factors", "def prime_numbers(max_number_eval=100):\n prime_numbers_list = list(next_prime(max_number_eval))\n print('The prime numbers from 2 to {} are:{}'.format(max_number_eval, prime_numbers_list))", "def find_truncatable_primes(limit: int, start_from: int):\n\n start_time = time.time()\n truncatable = set()\n next_prime = primes_generator_iterable(start_from)\n\n while len(truncatable) < limit:\n\n prime = next(next_prime)\n if is_truncatable(prime):\n truncatable.add(prime)\n\n result = sum(truncatable)\n print_time_log(start_time, result)\n return result", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def primes(count):\n\n # START SOLUTION\n\n primes = []\n num = 2\n\n while count > 0:\n\n if is_prime(num):\n primes.append(num)\n count -= 1\n\n num += 1\n\n return primes", "def prime_number_2d_array(self):\n\n prime_list = utility_obj.get_prime()\n row = 10\n column = 25\n limit = 100\n\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n k = 0\n for i in range(row):\n\n for j in range(column):\n\n if k < len(prime_list):\n if prime_list[k] <= limit:\n two_d_array[i][j] = prime_list[k]\n k += 1\n\n limit += 100\n\n for i in range(row):\n\n for j in range(column):\n\n if two_d_array[i][j] != 0:\n print(two_d_array[i][j], end=\" \")\n\n print()", "def get_prime_array(number_of_primes) -> array:\n p = array('i')\n with open(f'prime{number_of_primes}.bin', 'rb') as prime_file:\n p.fromfile(prime_file, number_of_primes) \n return p", "def numpy_sieve(num):\n # array of True values for sieve\n primes = np.ones(num, dtype=bool)\n\n # 0 and 1 are not prime\n primes[0] = primes[1] = False\n\n # filter out non-prime values\n for i in range(2, int(np.sqrt(num) + 1)):\n if primes[i]:\n primes[i * i :: i] = False\n\n # extract prime numbers\n primes = np.flatnonzero(primes)\n\n return primes", "def list_primes(n):\n primeList = []\n for i in range(n):\n if is_prime(i):\n primeList.append(i)\n return primeList", "def get_primes_list(start, end):\r\n primes_list_obj = PrimesList(start, end)\r\n primes_list = primes_list_obj.primes_list()\r\n return primes_list", "def find_primes_up_to(up_to=100):\n primes = []\n for i in range(2, up_to + 1):\n if is_prime(i):\n primes.append(i)\n return primes", "def mult_parities_python(bound, verbose=False):\n v = [None] * bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound, 2)) + 1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity + int(1)) % int(2)\n if verbose:\n print(\"loop {0} (of {1}); last = {2}\".format(k, loops, len(last)))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def n_length_primes(n):\n assert n > 0, \"Cannot generate a list of %d length primes.\" % n\n a = []\n for i in range(10**(n-1), 10**n):\n if is_prime(i):\n a.append(str(i))\n return a", "def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes", "def prime_numbers(x: int):\n A = [True] * x\n A[0] = A[1] = False\n for i in range(2, x, 1):\n if is_simple_number(i):\n for m in range(2 * i, x, i):\n A[m] = False\n n = 0\n for k in range(x):\n print(k, \"is prime\" if A[k] else \"is not prime\")\n if A[k]:\n n += 1\n\n B = [0] * n\n n = 0\n for k in range(x):\n if A[k]:\n B[n] = k\n n += 1\n return B", "def thousand_first_primes() -> List[int]:\n primes = []\n i = 0\n while len(primes) != 1000:\n primes += [i] if premier(i) else []\n i += 1\n return primes", "def get_primes(n):\n primes = [True] * (n / 2)\n for i in range(int((n / 2 - 1) / 2) >> 1):\n for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): \n primes[j] = False\n return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def gen_prime(start, end):\n if start <= 0 or end <= 0:\n raise Exception(\"Start and end values must be greater than zero.\")\n\n if start >= end:\n raise Exception(\"The start value cannot be greater than or equal to the end value\")\n\n prime_list = []\n\n for prime in range(start, end):\n if is_prime(prime):\n prime_list.append(prime)\n return prime_list", "def getKBitPrimes(k = 2 ** 10, n = 2 ** 20):\n\n lim = min(k + 1, n + 1) # we don't want to generate any primes larger than n\n\n numList = [True] * lim # initialise boolean list\n primes = [] # initialise list of primes\n\n for i in range(2, lim): # loop through list from index 2\n if numList[i]: # if it is True\n primes.append(i) # must be prime\n\n for j in range(i*i, lim, i): # loop through multiples\n numList[j] = False # setting them to false\n\n return primes # return ptimes", "def return_prime_numbers_less_tahn_100():\r\n primes = []\r\n for num in range(100):\r\n is_prime = True\r\n for i in range(2, num):\r\n if num % i == 0:\r\n is_prime = False \r\n if is_prime:\r\n primes.append(num)\r\n return primes", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def omega_primes(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(2, upper + 1):\n if nums[i] != 0: continue\n for j in range(i, upper + 1, i):\n nums[j] += 1\n return nums", "def list_primes(n):\n\tarr = [True] * n\n\tarr[0] = False\n\tarr[1] = False\n\tfor i in range(2, int(math.sqrt(n)) + 1):\n\t\tif is_prime(i):\n\t\t\tfor j in range(2 * i, n, i):\n\t\t\t\tarr[j] = False\n\tprimes = []\n\tfor i in range(len(arr)):\n\t\tif arr[i]:\n\t\t\tprimes.append(i)\n\treturn primes", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def list_primes(number):\n sieve = [True] * (number // 2)\n for i in range(3, int(number ** 0.5) + 1, 2):\n if sieve[i // 2]:\n sieve[i * i // 2::i] = [False] * ((number - i * i - 1) // (2 * i) + 1)\n return [2] + [2 * i + 1 for i in range(1, number // 2) if sieve[i]]", "def find_limits(self, \n label: str, \n limit: list,\n ) -> np.array :\n ci = np.array([])\n cdf = self.cdf[label]\n\n for l in limit:\n fintp = interp1d(cdf[0], cdf[1] - l, kind='linear')\n ci = np.append(ci, scipy.optimize.brentq(fintp, np.min(cdf[0]), \n np.max(cdf[0])))\n return ci", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n if n == 0 or n == 1:\n return []\n else:\n p = primes(int(sqrt(n)))\n no_p = { j for i in p for j in xrange(i*2, n+1, i) }\n p = { x for x in xrange(2, n + 1) if x not in no_p }\n return p", "def sieve(self, upto_num):\n max_cur_known = self.max_known_number()\n \n num_new = upto_num - max_cur_known\n #All new numbers are primes until they are crossed off\n self.number_list.extend(array.array('b', [1])*num_new)\n \n for marker_num in range(2, maths.floor(maths.sqrt(upto_num)) + 1):\n #For efficiency only use prime marked numbers\n if not self.is_prime(marker_num):\n continue\n \n min_x = max(max_cur_known // marker_num + 1, marker_num)\n max_x = upto_num // marker_num\n \n for x in range(min_x, max_x + 1):\n self.number_list[marker_num*x] = 0 # Non-prime", "def primesList(n):\n sieve = [True]*n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[2*i::i] = [False]*(len(sieve[2*i::i]))\n return [2]+[i for i in range(3,n,2) if sieve[i]]", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primes(numOfPrimes):\n\n primes = []\n # we want to start at 2003, which is the first prime after 2000, seeing as\n # we absolutely need to fit all 2000 keys on the hash table,\n i = 2003\n\n while len(primes) < numOfPrimes:\n isPrime = True\n\n for k in range(2, i):\n if i % k == 0:\n isPrime = False\n break\n\n if isPrime:\n primes.append(i)\n i += 1\n\n return primes", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def get_neighbor_idxes(x, n, limit):\n idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n]\n idxes = sorted(idxes)\n return np.array(idxes)", "def get_prime_list(low, num):\r\n prime_gen = prime_generator()\r\n current_prime = 2\r\n\r\n while current_prime < low:\r\n current_prime = next(prime_gen)\r\n\r\n result = [current_prime]\r\n for i in range(num):\r\n result.append(next(prime_gen))\r\n\r\n return result", "def sieve(endNum):\n\n logger.debug(\"EndNum is %i\" % endNum)\n prime = [True] * (endNum+1)\n primesList = []\n for candidate in range(2, endNum):\n #If number is prime, remove multiples\n if(prime[candidate]):\n primesList.append(candidate)\n for removeNum in range(candidate*2, endNum, candidate):\n prime[removeNum] = False\n else:\n pass\n return primesList", "def sieve(n):\n\n primes = []\n sieve = [0] * n\n\n for i in range(2, n):\n if sieve[i] == 0:\n primes.append(i)\n sieve[i*i:n:i] = [1] * slice_length(i*i, n, i)\n\n return primes", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def primes(count):\n\n prime_list = []\n num = 2\n\n while count > 0:\n\n if prime_checker(num):\n prime_list.append(num)\n count -= 1\n num += 1\n\n return prime_list", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primes(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]" ]
[ "0.80535877", "0.7970838", "0.7566367", "0.75646406", "0.74854904", "0.7452761", "0.73869497", "0.7217808", "0.72028553", "0.7137117", "0.69901395", "0.69098914", "0.6839922", "0.66989404", "0.6691934", "0.6605368", "0.65635985", "0.64167565", "0.6413339", "0.6396981", "0.63700086", "0.6362434", "0.63288206", "0.6313341", "0.63128763", "0.62797415", "0.62711865", "0.6261318", "0.6250775", "0.6234691", "0.62289125", "0.62276596", "0.6224899", "0.62241375", "0.6218659", "0.62177527", "0.6214356", "0.6188504", "0.6185936", "0.61830306", "0.61712575", "0.6163631", "0.6149631", "0.612563", "0.6120957", "0.61038", "0.60971576", "0.60811394", "0.6057837", "0.6050373", "0.6032851", "0.6017206", "0.5989161", "0.59819674", "0.5973628", "0.59728223", "0.5967177", "0.59568995", "0.59472364", "0.59462065", "0.5944411", "0.5940858", "0.5903159", "0.5892396", "0.58906806", "0.58826816", "0.5858235", "0.58565557", "0.5850171", "0.5848875", "0.58411026", "0.5838847", "0.58287305", "0.58276385", "0.58215016", "0.58158773", "0.5804695", "0.57965916", "0.5793912", "0.578901", "0.57874244", "0.5779875", "0.57711554", "0.5771127", "0.57596356", "0.57596356", "0.57515264", "0.57513094", "0.57465416", "0.57388127", "0.57317877", "0.57302713", "0.5728817", "0.5724878", "0.5723044", "0.5721612", "0.5719526", "0.57175374", "0.57144433", "0.5711545" ]
0.846045
0
Takes fragment lowest and highest limits as an integers and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes. Needs the primes from the first fragment of the program as input.
def get_primes_in_fragment(fragment_lowest, fragment_highest, primes_in_first_fragment): fragment_range = np.arange(fragment_lowest, fragment_highest) prime_mask = np.ones(len(fragment_range), dtype=bool) for p in primes_in_first_fragment: if fragment_lowest % p == 0: first_multiple = fragment_lowest // p else: first_multiple = fragment_lowest // p + 1 first_multiple_index = first_multiple * p - fragment_lowest prime_mask[first_multiple_index::p] = False return fragment_range[prime_mask]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)", "def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]", "def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)", "def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes", "def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)", "def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set", "def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes", "def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]", "def linear_sieve(max_n):\n smallest_factors = [0] * max_n\n primes = []\n\n for i in range(2, max_n):\n if smallest_factors[i] == 0:\n smallest_factors[i] = i\n primes.append(i)\n\n for p in primes:\n if p > smallest_factors[i] or i * p >= max_n:\n break\n smallest_factors[i * p] = p\n return primes, smallest_factors", "def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers", "def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]", "def get_primes_list(start, end):\r\n primes_list_obj = PrimesList(start, end)\r\n primes_list = primes_list_obj.primes_list()\r\n return primes_list", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret", "def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p", "def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]", "def mult_parities_python(bound, verbose=False):\n v = [None] * bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound, 2)) + 1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity + int(1)) % int(2)\n if verbose:\n print(\"loop {0} (of {1}); last = {2}\".format(k, loops, len(last)))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def get_primes(s):\n primes = bytearray([1] * s)\n for i in range(2, s):\n if primes[i] == 1:\n for j in range(i, s):\n if i * j < s:\n primes[i * j] = 0\n else:\n break\n return primes", "def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime", "def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def get_prime_array(number_of_primes) -> array:\n p = array('i')\n with open(f'prime{number_of_primes}.bin', 'rb') as prime_file:\n p.fromfile(prime_file, number_of_primes) \n return p", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def gen_prime(start, end):\n if start <= 0 or end <= 0:\n raise Exception(\"Start and end values must be greater than zero.\")\n\n if start >= end:\n raise Exception(\"The start value cannot be greater than or equal to the end value\")\n\n prime_list = []\n\n for prime in range(start, end):\n if is_prime(prime):\n prime_list.append(prime)\n return prime_list", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])", "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def omega_primes(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(2, upper + 1):\n if nums[i] != 0: continue\n for j in range(i, upper + 1, i):\n nums[j] += 1\n return nums", "def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]", "def sieve(upto):\n return list(prime_numbers(upto))", "def prime_numpy_version(n: int) -> List[int]:\n arm = range(2, np.floor(n / 2).astype(int) + 1)\n x, y = np.meshgrid(*([arm] * 2))\n\n Z = range(2, n + 1)\n D = x * y\n Diff = np.setdiff1d\n\n P = Diff(Z, D[D <= n].ravel())\n return P.tolist()", "def prime_numbers(max_number_eval=100):\n prime_numbers_list = list(next_prime(max_number_eval))\n print('The prime numbers from 2 to {} are:{}'.format(max_number_eval, prime_numbers_list))", "def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def primes(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]", "def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def get_primes(n):\n primes = [True] * (n / 2)\n for i in range(int((n / 2 - 1) / 2) >> 1):\n for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): \n primes[j] = False\n return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p", "def get_primes(n):\n\n return list(primes_sieve(n))", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1", "def prime_number_2d_array(self):\n\n prime_list = utility_obj.get_prime()\n row = 10\n column = 25\n limit = 100\n\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n k = 0\n for i in range(row):\n\n for j in range(column):\n\n if k < len(prime_list):\n if prime_list[k] <= limit:\n two_d_array[i][j] = prime_list[k]\n k += 1\n\n limit += 100\n\n for i in range(row):\n\n for j in range(column):\n\n if two_d_array[i][j] != 0:\n print(two_d_array[i][j], end=\" \")\n\n print()", "def find_all_primes(x=22):\n allprimes = []\n for i in range(2, x + 1):\n ##allows all the numbers between 2(smallest prime) and x be divided by x \n if is_prime(i):\n #using the function that set up just now\n allprimes.append(i)\n #\n print(\"There are %d primes between 2 and %d\" % (len(allprimes), x))\n return allprimes", "def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]", "def sieve(self, upto_num):\n max_cur_known = self.max_known_number()\n \n num_new = upto_num - max_cur_known\n #All new numbers are primes until they are crossed off\n self.number_list.extend(array.array('b', [1])*num_new)\n \n for marker_num in range(2, maths.floor(maths.sqrt(upto_num)) + 1):\n #For efficiency only use prime marked numbers\n if not self.is_prime(marker_num):\n continue\n \n min_x = max(max_cur_known // marker_num + 1, marker_num)\n max_x = upto_num // marker_num\n \n for x in range(min_x, max_x + 1):\n self.number_list[marker_num*x] = 0 # Non-prime", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def get_prime_list(low, num):\r\n prime_gen = prime_generator()\r\n current_prime = 2\r\n\r\n while current_prime < low:\r\n current_prime = next(prime_gen)\r\n\r\n result = [current_prime]\r\n for i in range(num):\r\n result.append(next(prime_gen))\r\n\r\n return result", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def primes_below(n):\n L, M = [2], [x for x in range(3, int(n), 2)]\n if n <= 2:\n print('There are no primes below 2')\n return None\n for i in range(3, int(n), 2):\n if M[i // 2 - 1] != 0 and is_prime(i):\n L.append(i)\n for j in range(i, int(n), 2 * i):\n M[j // 2 - 1] = 0\n return L", "def prime_gaps(maxp):\n P = prime_range(maxp + 1)\n return [P[i + 1] - P[i] for i in range(len(P) - 1)]", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def primes(n):\n if n == 0 or n == 1:\n return []\n else:\n p = primes(int(sqrt(n)))\n no_p = { j for i in p for j in xrange(i*2, n+1, i) }\n p = { x for x in xrange(2, n + 1) if x not in no_p }\n return p", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def solve_euler7():\n\n\tprime_numbers = [2]\n\ti=3\n\n\twhile len(prime_numbers) < 10001:\n\t\tif is_prime(i) == True:\n\t\t\tprime_numbers.append(i)\n\t\ti+=1\n\t#return len(prime_numbers)\t\n\treturn prime_numbers[10000]", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def primes(n: int) -> list:\n primes = [2]\n pot_primes = list(range(2,n+1))\n for number in pot_primes:\n for prime in primes:\n if number // prime == 0:\n continue\n else:\n primes += [x]\n\n\n print (primes)", "def primes(n):\n sqrtN=n**0.5\n odds=[2]\n odds+=[i for i in range(3,n) if i%2>0]\n\n for i in odds:\n if i!=0 and i<=sqrtN:\n for j in odds[odds.index(i)+1:]:\n if j%i==0:\n odds[odds.index(j)]=0\n return [i for i in odds if i!=0]", "def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1, bound + 1):\n if is_prime(n):\n k += 1\n v.append((n, k / n))\n return plot_step_function(v, **args)", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def sieve(upper=10**5):\n nums = [True] * (upper + 1)\n nums[0] = False\n nums[1] = False\n for i in range(2, upper + 1):\n if not nums[i]: continue\n for j in range(i * 2, upper + 1, i):\n nums[j] = False\n return nums", "def find_primes_up_to(up_to=100):\n primes = []\n for i in range(2, up_to + 1):\n if is_prime(i):\n primes.append(i)\n return primes", "def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def sieve_of_eratosthenes(n: int) -> List[int]:\n\n prime = [True for i in range(n+1)] #initiate array named prime with all value True, ie everynumber [0,n] are prime\n p = 2\n while (p * p <= n):\n # If prime[p] is not\n # changed, then it is a prime\n if (prime[p] == True): #if any number is prime then its multiple must be composite\n # Update all multiples of p to be not prime \n for i in range(p * p, n+1, p):\n prime[i] = False\n p += 1\n\n\n '''\n till here the status of code is:\n 0:prime\n 1:prime\n 2:prime\n 3:prime\n 5:prime\n 7:prime\n 11:prime\n .\n .\n .\n\n But 0 and 1 are not prime, so we will have to count numbers from 2\n '''\n\n return [i for i, p in enumerate(prime[2:], 2) if p]", "def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])", "def get_prime(origin):\n if origin < 2:\n raise ValueError(\"Please input more than 2 integer\")\n prime_number = []\n for i in range(2, origin):\n switch = True\n if not prime_number:\n prime_number.append(i)\n continue\n for n in prime_number:\n if i % n == 0:\n switch = False\n break\n if switch:\n prime_number.append(i)\n return prime_number", "def find_truncatable_primes(limit: int, start_from: int):\n\n start_time = time.time()\n truncatable = set()\n next_prime = primes_generator_iterable(start_from)\n\n while len(truncatable) < limit:\n\n prime = next(next_prime)\n if is_truncatable(prime):\n truncatable.add(prime)\n\n result = sum(truncatable)\n print_time_log(start_time, result)\n return result", "def all_primes(nums):\n return list(filter(lambda x: all(x % i != 0 for i in range(2, x)), nums))\n # assume is_prime is defined:\n # def is_prime(n):\n # return all(n % i != 0 for i in range(2, n))\n # return list(filter(is_prime, nums))", "def main():\n limit = 1000\n max_primes = 0\n max_b, max_c = 0, 0\n is_prime = sieve_of_eratosthenes_bool(limit * 100)\n primes = sieve_of_eratosthenes(limit)\n for c in primes:\n for b in range(-c, limit, 2):\n for n in count(1):\n res = n * n + b * n + c\n if res < 1 or not is_prime[res]:\n if max_primes < n:\n max_primes = n\n max_b, max_c = b, c\n print(max_primes, max_b, max_c, end='\\n')\n break\n print(max_b, max_c, max_b * max_c)", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def list_primes(number):\n sieve = [True] * (number // 2)\n for i in range(3, int(number ** 0.5) + 1, 2):\n if sieve[i // 2]:\n sieve[i * i // 2::i] = [False] * ((number - i * i - 1) // (2 * i) + 1)\n return [2] + [2 * i + 1 for i in range(1, number // 2) if sieve[i]]", "def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]", "def sieve(endNum):\n\n logger.debug(\"EndNum is %i\" % endNum)\n prime = [True] * (endNum+1)\n primesList = []\n for candidate in range(2, endNum):\n #If number is prime, remove multiples\n if(prime[candidate]):\n primesList.append(candidate)\n for removeNum in range(candidate*2, endNum, candidate):\n prime[removeNum] = False\n else:\n pass\n return primesList" ]
[ "0.7555909", "0.745268", "0.72320384", "0.6928359", "0.6911003", "0.6905713", "0.67729235", "0.6719026", "0.6628001", "0.66014606", "0.65840936", "0.6533467", "0.6412444", "0.64070624", "0.64064884", "0.63762397", "0.63470197", "0.6316131", "0.6301665", "0.6293129", "0.62916404", "0.628601", "0.62382615", "0.6158372", "0.6149993", "0.6138645", "0.60854316", "0.60767126", "0.60731333", "0.60654074", "0.6057109", "0.6053036", "0.60448366", "0.60419047", "0.60169226", "0.6008306", "0.6", "0.5995096", "0.59935504", "0.5972408", "0.59677905", "0.5962648", "0.59616065", "0.59413296", "0.59322894", "0.5911757", "0.59029365", "0.58970714", "0.58858925", "0.5843441", "0.5843399", "0.584023", "0.5832084", "0.58300704", "0.5828056", "0.58239204", "0.58182824", "0.58064747", "0.5792195", "0.5791793", "0.57821524", "0.5775391", "0.5772693", "0.5767305", "0.5764843", "0.5754233", "0.5742242", "0.57374", "0.57374", "0.5734251", "0.5733514", "0.57271904", "0.57228994", "0.5720872", "0.5720791", "0.5714437", "0.5709622", "0.5702802", "0.5698978", "0.56636274", "0.56529176", "0.5646323", "0.56392264", "0.56320274", "0.5631913", "0.5627791", "0.5625828", "0.56248856", "0.56110305", "0.55993104", "0.5599261", "0.55983293", "0.55960834", "0.5587983", "0.55862343", "0.5579373", "0.5579373", "0.55784297", "0.55782473", "0.5574048" ]
0.80316025
0
Takes a tuple where the first element is the dividend and the second element is the divisor. Both element sould be int. Performs a long division
def long_division(dividend_divisor_tuple, decimal_limit=5): natural, decimal = [], [] dividend, divisor = dividend_divisor_tuple[0], dividend_divisor_tuple[1] assert isinstance(dividend, int), "Dividend not int" assert isinstance(divisor, int), "Divisor not int" floor_div = dividend // divisor rest = dividend % divisor # Natural part of the division while floor_div > 0: natural.append(str(floor_div)) dividend = rest floor_div = dividend // divisor rest = dividend % divisor if rest == 0: # Divisor is factor of dividend print("Divisor is factor of dividend") return ("".join(natural), None, None) # Decimal part of the division dividend_list = [] recurring_index = None while len(decimal) < decimal_limit: dividend_list.append(dividend) dividend *= 10 floor_div = dividend // divisor decimal.append(str(floor_div)) rest = dividend % divisor if rest == 0: # Terminating decimal reached return ("".join(natural), "".join(decimal), None) elif rest in dividend_list: # Recurring cycle found recurring_index = dividend_list.index(rest) print("Recurring cycle found") break else: dividend = rest if recurring_index is not None: recurring = decimal[recurring_index:] decimal = decimal[:recurring_index] return ("".join(natural), "".join(decimal), "".join(recurring)) else: print("Decimal limit reached") return ("".join(natural), "".join(decimal), None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_numbers(a: int, b: int) -> int:\n return a / b", "def test_div():\n l = [1, 2, 3, 4]\n assert s7.div(*l) == 1 / 2 / 3 / 4\n assert s7.div(100, 20) == 5\n assert s7.div(100.0, 20) == 5.0\n assert s7.div(100, 20.0) == 5.0", "def div(seq):\n for (i, n) in enumerate(seq):\n # try dividing this number with all others\n # (in fact, we can only consider the subsequent numbers,\n # and check the ratio both ways)\n for j in range(i+1, len(seq)):\n ratio1 = seq[j] / seq[i]\n ratio2 = seq[i] / seq[j]\n for result in [ratio1, ratio2]:\n # is the result an integer? if so, done\n if is_int(result):\n return int(result)", "def division_algo(a, b):\n return a / b, a % b", "def kkDiv(*args):\n if (None in args):\n return None\n quot = float(args[0]) / float(args[1])\n if (quot > 1):\n return quot\n else:\n return 1/quot", "def divider_ref(dividend, divisor):\n rom_size = 2**8\n rom = [0 for _ in range(rom_size)]\n rom = [0] + [int(round(((2**16)-1)/float(ii)))\n for ii in range(1, rom_size)]\n rom = tuple(rom)\n divisor_reciprocal = rom[divisor]\n if dividend < 0:\n dividend_d1 = -dividend\n else:\n dividend_d1 = dividend\n mult = (dividend_d1 * divisor_reciprocal)\n mult_s = mult/(2**16)\n if dividend < 0:\n mult_s = -mult_s\n round_ = int((mult/(2**15)) % 2)\n if round_ == 1:\n if dividend >= 0:\n mult_s = mult_s + 1\n else:\n mult_s = int(mult_s - 1)\n return int(mult_s)", "def dividir(value, arg):\n return int(value) /int(arg)", "def div(a,b):\r\n return a/b", "def finddiv(x):\r\n \r\n div = (1, x)\r\n for i in range(2, x//2+1):\r\n if x%i==0:\r\n div+=(i,)\r\n return div", "def longDiv(c, e, n):\n if n == 0:\n return Scientific(c, e)\n else:\n # TODO: Use a logarithm here!\n # TODO: Can't use tail recursion like this in python!\n if n < d:\n return longDiv(c * 10, e - 1, n * 10)\n else:\n (q, r) = quotRemInteger(n, d)\n return longDiv(c+q, e, r)", "def division(a, b):\n return (a // b, a / b)", "def div(self, a, b):\n return (a / b, a % b)", "def div(x, y):\n return x / y", "def lcm(a: int, b: int) -> int:\n return (a * b) // gcd(a, b)", "def int_div_inplace(a, b):", "def multiple(a, b):\n from fractions import gcd\n def lcm(x,y):\n \treturn (x*y)//gcd(x,y)\n #return lcm(a,b)\n \n def gcd(x,y):\n if y > x:\n x, y = y, x\n while y != 0:\n x, y = y, x % y\n return x\n return (a*b) // gcd(a,b)", "def _lcm_f(a, b):\n return int((a * b) / _gcd_f(a, b))", "def divide(self, dividend, divisor):\n MAX_INT = 0x7FFFFFFF\n MIN_INT = 0x80000000\n\n if divisor == 0:\n return MAX_INT\n\n sign = 1 if dividend > 0 and divisor > 0 or dividend < 0 and divisor < 0 else -1\n dividend, divisor = abs(dividend), abs(divisor)\n res = 0\n while dividend >= divisor:\n pow2 = 1\n tmp = divisor\n while dividend >= tmp:\n tmp <<= 1\n pow2 <<= 1\n tmp >>= 1\n pow2 >>= 1\n dividend -= tmp\n res += pow2\n \n res = sign * res\n return res if res <= MAX_INT else MAX_INT", "def divide(num):\n return (int(num / 2))", "def divisors(n):\n return tuple(_divisor_gen(n))", "def divide(*args):\n body = ['<h1>Divison Calculator</h1>']\n try:\n quotient = reduce(lambda x,y: x / y, map(int,args))\n body.append(f'Total equals: {quotient}')\n except ZeroDivisionError:\n raise ZeroDivisionError\n return '\\n'.join(body)", "def divide(self, dividend: int, divisor: int) -> int:\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << (shift + 1):\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)", "def ceil_intdiv(a, b):\r\n # If a and b are int with not many significant bits, we could\r\n # cast them to float to avoid doing the modulo. We do not know if this\r\n # is faster or not. But this is not safe for int64 as the cast will\r\n # lose precision.\r\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))\r\n\r\n # We cast for the case when a and b are uint*. Otherwise neq will\r\n # force their upcast to int.\r\n div = int_div(a, b)\r\n ret = cast(neq(a % b, 0), div.dtype) + div\r\n assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])\r\n return ret", "def divide(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n quotient = str(args[0] / args[1])\n return quotient", "def div(a: Decimal, b: Decimal) -> Decimal:\n return a / b", "def division(a, b):\n if b != 0:\n return a//b", "def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)", "def div_value(self, lv, rv):", "def div(a, b):\n a = float(a)\n b = float(b)\n return a / b", "def ceildiv(a, b):\n return - (-a // b)", "def divide(number_1, number_2):\n return int(number_1) / float(number_2)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def even_quotient(nums: list) -> int:\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]", "def lcm(a, b):\n if not isinstance(a, int):\n a = int(a)\n if not isinstance(b, int):\n b = int(b)\n return abs(a*b) / gcd(a, b)", "def divisor_num(x):\n factor_pow = map(lambda y: y + 1, factorint(x).values())\n div_num = reduce(mul, factor_pow)\n return div_num", "def divide_exact(n,d):\n return floordiv(n,d),mod(n,d)", "def div(num1, num2):\n return num1 / num2", "def div(num1, num2):\n return num1 / num2", "def _ceil_div(value, block):\n return (value + block - 1) // block", "def floor_div(a, b):\r\n # see decorator for function body\r", "def h_d_n(x:int) -> tuple:\n return(x // 100, (x % 100) // 10, x % 10)", "def greatest_common_divisor(a: int, b: int) -> int:\n#[SOLUTION]\n while b:\n a, b = b, a % b\n return a", "def beat_division(a,b):\n if b == 0:\n return 0\n return a // b", "def div1(left: float, right: float) -> float:\n return left / right", "def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def divup(a, b):\n return (a + b - 1) // b", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def longDivNoLimit(c, e, ns, n):\n if n == 0:\n return (Scientific(c, e), Nothing)\n else:\n e_prime = map.lookup(n, ns)\n if isJust(e_prime):\n return (Scientific(c, e), maybe.map(negate, e_prime))\n elif n < rat.denominator:\n return longDivNoLimit(c * 10, e - 1, map.insert(n, e, ns), n * 10)\n else:\n (q, r) = quotRemInteger(n, rat.denominator)\n return longDivNoLimit(c + q, e, ns, r)", "def lcm(*args):\r\n\treturn functools.reduce(lambda x, y: x * y / gcd(x, y), args)", "def division_algorithm(n):\n assert n < 1000\n decimals = []\n dividend = 1\n divisor = n\n counter = 0\n repeating, repeating_length = False, 0\n while dividend != 0 and not repeating:\n dividend = dividend * 10\n decimals.append(dividend // divisor)\n dividend = dividend % divisor\n counter += 1\n repeating, repeating_length = is_repeating(decimals)\n if repeating:\n counter = repeating_length\n return repeating, counter", "def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58", "def test_floordiv():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value // 2\n num_a.value //= 2\n assert num_a.value == new_value", "def divide(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n\n try:\n quotient = str(ft.reduce(oper.truediv,values))\n except ZeroDivisionError:\n quotient = \"You can't divide by zero! Everyone knows that!\"\n\n return quotient", "def lcm(a, b):\r\n return a * b // gcd(a, b)", "def division(x, y):\n return x / y", "def div(a, x):\n return [a[i]/x for i in range(2)]", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def Div(a, b):\n\tRequire(b > 0)\n\tc = a / b\n\treturn c", "def div_sum(data: list) -> int:\n\n def even_quotient(nums: list) -> int:\n \"\"\"Finds the quotient of the only two numbers in the list that evennly divide.\"\"\"\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]\n\n total = 0\n for row in data:\n total += even_quotient(row)\n return total", "def __long__( self ):\r\n\t\treturnvalue = self.numerator / self.denominator\r\n\t\tif ( type( returnvalue ) == types.ComplexType ):\r\n\t\t\treturnvalue = long( abs( returnvalue ) )\r\n\t\telse:\r\n\t\t\treturnvalue = long( returnvalue )\r\n\t\treturn returnvalue", "def calculate_lcm(a, b):\n return a * b / calculate_gcd(a, b)", "def calculate_cruft(dividend, divisor):\n if divisor:\n result = float(len(dividend)) / len(divisor)\n else:\n result = 0.0\n return result", "def ceil_division(left_number, right_number):\n\t\t\treturn -(-left_number // right_number)", "def make_division_by(n):\n assert type(n) in [int, float], \"n must be a number\"\n assert n != 0, \"division by zero is not allowed\"\n\n def divider(x):\n assert type(x) in [int, float], \"x must be a number\"\n return x / n\n\n return divider", "def test_list_int(self):\n result = div(2, 4)\n self.assertEqual(result, 0.5)", "def convert_ints_to_floats(in_ints, divider):\n return (in_ints.astype(numpy.float64) / divider)", "def quotient(left_object, right_object):\n return int(float(left_object)/right_object)", "def greatest_common_divisor(x: int, y: int) -> int:\n while y != 0:\n (x, y) = (y, x % y)\n return x", "def get_divide_ab(a, b): # IN= 2'int' / OUT= 1'foat'\n return float(a/b)", "def div(a, b):\r\n if type(b) in inttypes_set:\r\n if not b:\r\n return Infinity(a)\r\n raise ZeroDivisionError('%r / %r' % (a, b))\r\n if b == 1:\r\n return a\r\n if type(a) in inttypes_set:\r\n return normalized_fraction(a, b)\r\n return a / b", "def lcm(a, b):\n\n\treturn (a * b)/gcd(a, b)", "def divisor (a,b):\n return a/b #i output a value by using the return statement", "def lcm(x, y):\n return x*y//gcd(x,y)", "def make_division_by(n):\n\n def division(x):\n assert type(x) == int, \"You can only use integers\"\n return x / n\n\n return division", "def lcms(argg: range) -> int:\n l = 1\n for arg in argg:\n l = lcm(l, arg)\n return l", "def num_permut(x) -> tuple:\n h = x // 100\n d = (x % 100) // 10\n n = x % 10\n return(n * 100 + d * 10 + h)", "def lcm(*numbers): \n def lcm(a, b):\n return (a * b) // gcd(a, b)\n return reduce(lcm, numbers, 1)", "def divide(a, b):\n return a / b", "def divide(a, b, floor=True):\n try:\n if floor:\n return a // b\n else:\n return a / b\n except TypeError:\n raise TypeError('unsupported operand type, use numbers of type int or float')", "def lcm(x: int, y: int) -> int:\n assert isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0\n return int(x * y / gcd(x, y))", "def divide(x, y):\n\n return x / y", "def as_integer_ratio(self, a, **args):\n v, n = math.frexp(a) # XXX: hack, will work only for floats\n\n for i in xrange(300):\n if v != math.floor(v):\n v, n = 2*v, n-1\n else:\n break\n\n numer, denom = int(v), 1\n\n m = 1 << abs(n)\n\n if n > 0:\n numer *= m\n else:\n denom = m\n\n n, d = self.limit_denom(numer, denom, **args)\n\n if a and not n:\n return numer, denom\n else:\n return n, d", "def the_division_is_aimed(numb1, numb2):\r\n return f\"Your result: {numb1//numb2}\"", "def div(a, b):\n c = Calculator()\n result = c.div(a, b)\n click.echo('{} / {} = {}'.format(a, b, result))", "def lcm(a, b):\n\n if a == b == 0:\n return 0\n\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def div(a, b):\n\n c = 0\n d = b\n while True:\n if d > a:\n break\n else:\n c = c + 1\n d = d + b\n\n return c", "def rat2cont_quot(x, y):\n\tcont = []\n\twhile y != 0:\n\t\tcont.append(x // y)\n\t\tx, y = y, x % y\n\treturn cont" ]
[ "0.65800995", "0.6371114", "0.63462335", "0.6256875", "0.6244074", "0.62214583", "0.6192333", "0.61851525", "0.6185146", "0.61841047", "0.6174636", "0.6085751", "0.6082528", "0.603412", "0.6029013", "0.6028425", "0.6017244", "0.6011632", "0.60035694", "0.59994143", "0.5984466", "0.598289", "0.59781003", "0.596462", "0.5962144", "0.5948469", "0.5946427", "0.5941916", "0.5941576", "0.59414226", "0.5924082", "0.59166807", "0.59166807", "0.5903113", "0.5898446", "0.5869801", "0.5855534", "0.5846138", "0.5846138", "0.5844257", "0.58358496", "0.5832006", "0.5829564", "0.581562", "0.57990974", "0.57924926", "0.57898915", "0.5789467", "0.5789467", "0.57649386", "0.5751202", "0.57492244", "0.5735511", "0.5732463", "0.5726606", "0.57237", "0.57200646", "0.57180977", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.5689395", "0.56820166", "0.56794846", "0.5668078", "0.56657964", "0.56604093", "0.5646307", "0.5626088", "0.5620988", "0.5618631", "0.5614688", "0.5611547", "0.5608605", "0.5601323", "0.5587473", "0.5586192", "0.55823725", "0.55811524", "0.55797106", "0.5577907", "0.5571456", "0.5559014", "0.55528945", "0.5543364", "0.55420554", "0.5538859", "0.5533672", "0.5532946", "0.55263877", "0.55246043", "0.55246043", "0.55246043", "0.55174017", "0.55167794" ]
0.6508378
1
Get length of number in digits.
def get_number_length(number): return len(str(number))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ndigits(n):\n return len(str(abs(n)))", "def count_digits(n):\n return len(str(n))", "def _number_of_digits(number: int) -> int:\n return int(log10(number)) + 1", "def get_int_width(integer):\n return len(str(integer))", "def get_length(x):\n\n try:\n return int(x)\n except Exception:\n return len(x)", "def getLength(self):\n return self.n", "def count_digits(n):\n count = 0\n n=abs(n)\n while n!=0:\n count += 1\n n = n // 10\n return count", "def count_digits(num):\n total = 0\n while num is not 0:\n total += num % 10\n num //= 10\n return total", "def get_length(val):\n if isinstance(val, str):\n return len(val)\n if isinstance(val, int):\n return len('%8s' % val)\n if isinstance(val, float):\n return len('%15.4f' % val)\n if isinstance(val, bool):\n return 5", "def get_length(self):\n\n return self._length", "def digit_count(x):\n return int(math.floor(math.log10(x)) + 1)", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def get_length(self):\n\n return self.length", "def length(self) -> 'int':\n return self._info.len", "def _get_length(self):\n return self._length", "def length(n):\n a, b, c = n / 100, (n % 100) / 10 * 10, n % 10\n total = 0\n if a:\n total += num_to_len[a] + len('hundred')\n if b or c:\n total += len('and')\n if b == 10:\n total += num_to_len[b+c]\n else :\n if b > 1:\n total += num_to_len[b]\n if c:\n total += num_to_len[c]\n return total", "def getLength(self) -> float:\n return self.length", "def length(self):\n return self.n * self.t.length()", "def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length", "def length(self):\n\t\treturn self.n", "def nr_digits_number(armstrong_candidate: int) -> int:\n number_of_digits = 0\n while armstrong_candidate != 0:\n armstrong_candidate = int(armstrong_candidate / 10)\n number_of_digits += 1\n\n return number_of_digits", "def length(value):\n\n # Try to return the length\n return len(value)", "def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))", "def length(self):\n return self._info.length # pylint: disable=E1101", "def total_length(self):\n return abs(self.length)", "def num_digits(num):\r\n if num == 0:\r\n return 1\r\n return int(log10(num)+1)", "def bitlen(number):\n assert(isinstance(number, int))\n if number == 0:\n return 1\n else:\n return floor(log2(number)) + 1", "def getLength(self):\n return self.length", "def length(self) -> ir.IntegerValue:\n return ops.MapLength(self).to_expr()", "def _binary_digits(num):\n ln = math.log(abs(num), 2) if num != 0 else 1\n ndigits = math.ceil(ln)\n if int(ln) == ln:\n ndigits += 1\n\n should_pad = ((ndigits % 8) != 0)\n if should_pad:\n ndigits = (int(ndigits / 8) * 8) + 8\n\n return ndigits", "def lenOfRec(denom):\n d = denom\n n = 1 #numerator\n count = 0\n while n!=0:\n n = n*10 % d\n count += 1\n if n == 1:\n break\n \n return count", "def length(self: GtinFormat) -> int:\n return int(self)", "def getLength(self):\n return self.count", "def get_base_length(self):\n str_len_input = self.entries[\"ent_base_length\"].get()\n if str_len_input in ['', '.', '+.', '-.', '+', '-']:\n return 10.0 # default base length\n return float(str_len_input)", "def getLength(self):\n flength = 0\n for quad in self._quadrilaterals:\n flength = flength + get_quad_length(quad)\n return flength", "def get_length(self):\n if(type(self._length) != float):\n self._logger.write(\"Error! length must be of type float\")\n elif(self._length == None):\n self._logger.write(\"Error! length contains no value\")\n else:\n try:\n return self._length\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the value of length: \\n %s\" % e)", "def get_n_digit(num):\n cnt = 0\n while num & 1 != 1:\n num >>= 1\n cnt += 1\n # print(cnt)\n return cnt", "def get_street_length(street):\r\n return len(street)", "def sum_of_digits_in_number(n: int) -> int:\n return sum(int(digit) for digit in str(n))", "def getLength(self):\n return self.geometry.length", "def total_length(self):\n return self.length", "def get_length(dna):\n return len(dna)", "def getLen(self):\n return self.len", "def length(self):\n return self._length", "def length(self):\n return self._length", "def length(self) -> int:\r\n\r\n return self.__length", "def get_length(dna):\n return len (dna)", "def ndigits(x):\n if type(x) != int: # basic error handling of non-int\n print('The value of x is not an integer')\n return None\n if abs(x) < 10:\n return 1 # single digit case\n x_recursive = int((x - x % 10)/10) # remove last digit using modulo\n return 1 + ndigits(x_recursive)", "def timestamp_length(self) -> int:\n timestamps = self.timestamps_sorted_list()\n base_length = computation.num_digits(timestamps[0]) if len(timestamps) > 0 else -1\n indexes = [1, 2, 3, 4, 5, -1, -2, -3, -4] if len(timestamps) > 10 else list(range(1, len(timestamps)))\n for n in indexes:\n length = computation.num_digits(timestamps[n])\n if length != base_length:\n return -1\n return base_length", "def total_length():\n return", "def countDigits(n):\n digits = [0]*10\n while n > 0:\n digits[n%10] += 1\n n = n//10\n return digits", "def getLength(self):\n stop = 0\n if type(self.stop) is SharedCounter:\n stop = self.stop.getVal()\n else:\n stop = self.stop\n return stop - self.start", "def size_as_number_of_bits(size):\n\n if size == 0:\n return 0\n else:\n return len('{:b}'.format(size))", "def length(self):\n return self.length", "def get_length(self):\n length = 0\n for card in self.decklist:\n length += card.amount\n return length", "def length(self):\n\n return self._length", "def lastTen(self, num, length):\n\t\tif (length-num <=10):\n\t\t\treturn 1\n\t\treturn 0", "def getTotalLength(self):\n return self.length", "def _count_zero(number):\n zero_count = 0\n while number > 9:\n if number % 10 == 0:\n zero_count += 1\n number /= 10\n else:\n break\n return zero_count", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def value_length(self):\n return self._length", "def digits(n, base=10):\n if n == 0:\n return 1\n\n n = abs(n)\n if base != 10:\n digits = math.log(n, base)\n else:\n digits = math.log10(n)\n return int(digits) + 1", "def get_length(self):\n return self.run_command('get_length')[0]", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def len(x) -> int:\n pass", "def calculate_length(self):\n raise NotImplementedError", "def len2(x):\n \n if hasattr(x, '__len__'):\n \n length = len(x)\n \n elif isinstance(x, (int,float,long,complex)):\n \n length = 1\n \n return length", "def getLength(self):\n return _libsbml.XMLAttributes_getLength(self)", "def lena(self) -> int:\n return self._core.lena()", "def length(self) -> int:\n pass", "def length(self):\n return self.length2 ** 0.5", "def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum", "def decimal_to_digits(decimal, min_digits=None):\n digits = abs(int(np.log10(decimal)))\n if min_digits is not None:\n digits = np.clip(digits, min_digits, 20)\n return digits", "def calculate_digits_sum(number: int) -> int:\n\n # Return value\n ret = 0\n\n while number != 0:\n # Extract the last digit number and add it to ret\n ret += number % 10\n\n # Delete the last digit of the number\n number //= 10\n\n return ret", "def Length(self) -> int:", "def Length(self) -> int:", "def sum_of_digits(n):\n return sum(int(c) for c in str(n))", "def total_nt(self) -> int:\n return self.sequence.length", "def get_unstr_length(self):\n if self.unstr_length is None:\n return self.length()\n\n elif isinstance(self.unstr_length, str):\n return self.length() + float(self.unstr_length)\n\n return self.unstr_length", "def get_total_gti_length(gti, minlen=0):\n lengths = get_gti_lengths(gti)\n return np.sum(lengths[lengths >= minlen])", "def pixels_to_length(pixels, boxsize, num_pixels):\n length = (boxsize * pixels) / num_pixels\n\n return length", "def len_of_size(self, size):\n return self.size_freqs[size]", "def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()", "def reallength(value):\n try:\n value.count()\n except:\n return len(value)", "def get_length(self):\n return self._select_interface(self._rc_get_length,\n self._http_get_length)", "def get_sum_of_digits(number):\n return sum(int(digit) for digit in str(number))", "def __len__(self):\r\n return numBits(self.n)", "def GetSequenceLength(num_nodes: int) -> int:\n return num_nodes * (3 + (num_nodes - 1) * 2)", "def get_length(self):\n return self.resource.get_size()", "def last_n_digits(num, n):\n return num%(10**n)", "def sequence_length(self):\n return self.get_sequence_length()", "def Length (v) :\n if v in (0, \"0\") :\n result = Px (0)\n elif isinstance (v, pyk.string_types) :\n pat = _Length_.Pat\n v = v.strip ()\n if v in _length_keywords :\n result = v\n elif pat.match (v) :\n T = _Length_._Unit_Map [pat.unit.lower ()]\n n = pat.number\n result = T (float (n) if (\".\" in n) else int (n, 10))\n else :\n raise ValueError (v)\n elif isinstance (v, _Length_) :\n result = v\n else :\n raise ValueError (v)\n return result", "def length(self) -> int:\n return self.size", "def length(self) -> int:\n return self.size", "def length(self):\n return pyvista.Box(self.bounds).length", "def len_score(n):\n return len(n)", "def length(self):\n return Integer(len(self._g))", "def char_size(self):\n return len(self.id2char)" ]
[ "0.7974036", "0.78436166", "0.75876623", "0.72771937", "0.68577904", "0.68538064", "0.677399", "0.6735333", "0.6720063", "0.6719415", "0.6715743", "0.6707653", "0.6707653", "0.6627682", "0.6612727", "0.65931964", "0.6514543", "0.64946806", "0.6464933", "0.6453461", "0.6445275", "0.6431447", "0.6412844", "0.6403521", "0.6393142", "0.6386502", "0.63793707", "0.63760835", "0.6364863", "0.6319634", "0.6275811", "0.62678725", "0.6261122", "0.62517065", "0.6236843", "0.62318987", "0.62052083", "0.6198943", "0.61960506", "0.6193818", "0.618858", "0.6175716", "0.61711586", "0.6169915", "0.61678326", "0.61678326", "0.6158626", "0.61493635", "0.6144819", "0.61360556", "0.61308753", "0.61293787", "0.6128995", "0.6122491", "0.61185706", "0.6103301", "0.6100808", "0.61000097", "0.6097051", "0.6093681", "0.6086458", "0.6086458", "0.6084414", "0.6074413", "0.6069379", "0.6051864", "0.6047926", "0.60133535", "0.6011092", "0.60069036", "0.6004858", "0.6004667", "0.5981054", "0.59599286", "0.5943674", "0.5940292", "0.593628", "0.593628", "0.5935667", "0.59355766", "0.5935186", "0.5922662", "0.59010243", "0.58842474", "0.58836484", "0.58805496", "0.5879161", "0.58717936", "0.58581406", "0.58541524", "0.5853617", "0.5845857", "0.58441114", "0.5838033", "0.5831284", "0.5831284", "0.5830885", "0.58248377", "0.5822974", "0.58222264" ]
0.8823181
0
Returns array of all the posible sums between list elements.
def cross_sum_elements_of_list(list_of_int): array_of_int = np.array(list_of_int).reshape((len(list_of_int), 1)) transposed_array = array_of_int.copy().T sum_of_elements_array = array_of_int + transposed_array return np.unique(sum_of_elements_array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum(lists) -> list:\r\n return list(np.sum(lists, 0))", "def sum_elements(arr):\n return sum(arr)", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n sum: float = 0\n for i in input_list:\n sum = sum + i\n return sum", "def sumValues(aList):\r\n sum = 0\r\n for d in aList:\r\n sum += d\r\n return sum", "def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def ll_sum(x):\n xlist = []\n for i in x:\n for num in i:\n xlist.append(num)\n return sum(xlist)", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total", "def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)", "def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)", "def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total", "def list_sum(lst):\n total = 0\n for ele in range(0, len(lst)):\n total = total + lst[ele]\n return total", "def sum_list_elements(input_list):\n print(f\"Sumatorio de los elementos dela lista: {sum(input_list)}\")", "def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum", "def sum_all_element(my_list):\n result = 0\n for i in range(len(my_list)):\n result = result + int(my_list[i])\n return result", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum_list(num_list):\n # return sum(num_list)\n sum_list = 0\n for number in num_list:\n sum_list += number\n print(sum_list)\n \n # code prints out the sum_list for each value, increasing by the value each time\n # final output is the sum of numbers\n # currently no output for '[]' as input ", "def sum(inputList):\n sum=0#the sum of the list starts from 0\n for num in inputList:\n sum=sum+num#add all number in the list\n print(\"the sum is\",sum)", "def cumsum(ls):\n\t\n\tacc = 0\n\tr = [0 for v in ls]\n\tfor i,v in enumerate(ls):\n\t\tacc += v\n\t\tr[i] = acc\n\treturn r", "def sum1d_pos(summand):\n total = 0\n for i in range(summand.size):\n total += summand[i]\n return total", "def sum_items(numbers):\n total = 0\n for item in numbers:\n total += item\n return total", "def cumulative_sum(x):\n total = 0\n sum = []\n for num in x:\n total += num\n sum.append(total)\n return sum", "def postitionalSum(list1, list2):\r\n\r\n myList = []\r\n\r\n if list1 == []:\r\n return []\r\n else:\r\n myList.append(list1[0]+list2[0])\r\n list1.remove(list1[0]), list2.remove(list2[0])\r\n postitionalSum(list1, list2)\r\n return myList", "def sum_list(list_obj):\r\n sum = 0\r\n for num in list_obj:\r\n sum += num\r\n return sum", "def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:\n return sum(mxd_lst)", "def cumulative_sum(array):\n res = []\n val = 0\n for elem in array:\n val += elem\n res.append(val)\n return res", "def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result", "def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s", "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def sum_items(a_list):\r\n assert len(a_list) >= 0, \"Length of List cannot be negative\"\r\n sum = 0\r\n if len(a_list) == 0:\r\n return sum\r\n else:\r\n for i in range(len(a_list)):\r\n sum += a_list[i]\r\n return sum", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def sum_values(values):\n return (sum(values))", "def cumulative_sum(self, lis):\n new_list = []\n for i in range(len(lis)):\n if i == 0:\n new_list.append(lis[i])\n else:\n new_list.append(new_list[i-1] + lis[i])\n return new_list", "def running_sum(nums_li: List[int]) -> List[int]:\n for i in range(1, len(nums_li)):\n nums_li[i] += nums_li[i - 1]\n return nums_li", "def find_sum( *my_list):\n # a = len(my_list)- 2\n # i = 0\n # suma=0\n # for i in my_list :\n # suma += my_list[i]\n # i+=1\n # return suma\n return sum(my_list)", "def sum_array(arr):\n sum = 0\n for num in arr:\n sum += num\n return sum", "def sum_fuel(fuel_list):\n return functools.reduce(lambda a, b: a+b, fuel_list)", "def add_list_numbers(incoming_list):\n # summation=0\n if incoming_list:\n summation = sum(incoming_list)\n else:\n summation = 0\n return summation", "def sum_unique(l):\n pass", "def mult_and_sum(*arg_list):\r\n result = numpy.empty(arg_list[0].shape, dtype=numpy.float32)\r\n result[:] = nodata\r\n array_stack = numpy.array(arg_list[0::2])\r\n scalar_list = numpy.array(arg_list[1::2])\r\n # make a valid mask as big as a single array\r\n valid_mask = numpy.logical_and.reduce(\r\n array_stack != pop_nodata, axis=0)\r\n\r\n # mask out all invalid elements but reshape so there's still the same\r\n # number of arrays\r\n valid_array_elements = (\r\n array_stack[numpy.broadcast_to(valid_mask, array_stack.shape)])\r\n array_stack = None\r\n\r\n # sometimes this array is empty, check first before reshaping\r\n if valid_array_elements.size != 0:\r\n valid_array_elements = valid_array_elements.reshape(\r\n -1, numpy.count_nonzero(valid_mask))\r\n # multiply each element of the scalar with each row of the valid\r\n # array stack, then sum along the 0 axis to get the result\r\n result[valid_mask] = numpy.sum(\r\n (valid_array_elements.T * scalar_list).T, axis=0)\r\n scalar_list = None\r\n valid_mask = None\r\n valid_array_elements = None\r\n return result", "def sum_numbers(numbers):\n sum = 0\n for number in numbers:\n sum += number\n\n return sum", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results", "def add_list_numbers(incoming_list: list):\n return sum(incoming_list)", "def cumulative_sum(some_list):\n #This function will return new list that every element is the sum of the element before.\n for i in range(len(some_list)):\n new_list = some_list\n if i > 0: #if it's not the first element in list so it can be sum by element before.\n new_list[i] += new_list[i-1]\n else:\n pass #don't do anything with the first element of the list.\n return new_list", "def rowSum(mtx):\n try:\n for i in range(0, len(mtx)):\n assert len(mtx[i]) == len(mtx[i-1]) # check whether each list has the same length.\n \n res = list()\n for j in range(0, len(mtx[0])): \n tmp = 0\n for i in range(0, len(mtx)): \n tmp = tmp + mtx[i][j]\n res.append(tmp)\n return(res)\n \n except AssertionError as detail:\n return ('Length of lists is irregular or input format is wrong.')\n except TypeError as detail:\n return ('Undefined operand type')", "def sum_for_list(lst):\n list_of_nods = []\n for num in lst:\n temp_list = simple_nod(abs(num))\n for item in temp_list:\n if item not in list_of_nods:\n list_of_nods.append(item)\n result = []\n for nod in list_of_nods:\n flag = False\n sum = 0\n for num in lst:\n if not num % nod:\n sum += num\n flag = True\n if flag:\n result.append([nod, sum])\n return sorted(result, key=lambda x: x[0])", "def summation(self):\n return sum(self.read_ints())", "def sum_node_list(node_list):\n node_list = [n for n in node_list if n is not None]\n if node_list == []:\n return None\n\n from operator import add\n from functools import reduce\n return reduce(add, node_list)", "def sum(self):\n return self.aggregate(np.sum)", "def cumsum(L):\n for i in range(1, len(L)):\n L[i] += L[i-1]\n return L", "def __call__(self, *array_list):\n valid_mask = numpy.zeros(array_list[0].shape, dtype=bool)\n result = numpy.empty_like(array_list[0])\n result[:] = 0\n for array in array_list:\n local_valid_mask = array != _INDEX_NODATA\n result[local_valid_mask] += array[local_valid_mask]\n valid_mask |= local_valid_mask\n result[~valid_mask] = _INDEX_NODATA\n return result", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def sum_values(self):\n raise NotImplementedError", "def add_list_numbers(incoming_list):\n if incoming_list:\n retval = sum(incoming_list)\n else:\n retval = 0\n return retval", "def task_8_sum_of_ints(data: List[int]) -> int:\n return sum(data)", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result", "def total2d(arr: List[List[int]]) -> int: # _8 [✅]\n # ** try to solve this in one line using a list comprehension\n return sum( [sum(sub_arr) for sub_arr in arr ] )", "def summed(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield sum(v)", "def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total", "def add_lists(*listeler):\n\n\n print map (sum, zip(*listeler))", "def updateSumList(abList, sList):\n for element in abList:\n # the only difference is the latest added element\n sList.add(element + abList[-1])\n return sList", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def timeseries_list_sum(data, series_list, field_spec):\n return TimeSeries.timeseries_list_reduce(data, series_list, Event.sum, field_spec)", "def total(h):\r\n\treturn sum(i.points() for i in h)", "def checksumoflist(l):\n x = 0\n for _ in range(len(l)):\n x += int(l[_])\n return x", "def fsum(iterable):\n return 0.0", "def suma_parejas(pares):\n a, b = 0, 0\n for x, y in pares:\n a, b = a + x, b + y\n return a, b", "def sum(self):\n return sum(self.values)", "def get_sum_zero_pairs(numbers):\n numbers = set(numbers)\n numbers = list(numbers)\n pairs_that_add_to_zero = []\n\n for i, item in enumerate(numbers):\n if numbers[i] == len(numbers):\n break\n\n if numbers[i] == 0:\n pairs_that_add_to_zero.append([0, 0]) \n\n for j in range(i+1, len(numbers)):\n total_of_two_items = numbers[i] + numbers[j]\n if (total_of_two_items == 0):\n pairs_that_add_to_zero.append([numbers[i], numbers[j]]) \n\n return pairs_that_add_to_zero", "def add_list_numbers(incoming_list):\n if incoming_list: #if incoming_list is not None and len(incoming_list) > 0\n return_value = sum(incoming_list)\n else:\n return_value = 0\n return return_value", "def sum_list(lst):\n\n if lst == []:\n return 0\n else:\n return lst[0] + sum_list(lst[1:])", "def pair_sum(list1, x):\n\n for y in list1:\n for m in list1:\n if y+m==x:\n difference=abs(y-m)\n return difference", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def lcumsum (inlist):\r\n newlist = copy.deepcopy(inlist)\r\n for i in range(1,len(newlist)):\r\n newlist[i] = newlist[i] + newlist[i-1]\r\n return newlist", "def mono_sum(l):\n r = next(l)\n for m in l: r = r + m\n return r", "def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def sum_value(self, lv, rv):", "def subarraySum(self, nums):\n if not nums:\n return []\n\n sum_to_index = {}\n sum_to_index[0] = -1\n\n prefix_sum = 0\n for i in range(len(nums)):\n prefix_sum += nums[i]\n\n if prefix_sum in sum_to_index:\n return [\n sum_to_index[prefix_sum] + 1,\n i\n ]\n\n sum_to_index[prefix_sum] = i\n\n return []", "def sumList(list1,list2):\n \n num1 = 0\n pos1 = 1\n pointer1 = list1.head\n while pointer1 != None:\n num1 += pointer1.data * pos1\n pos1 *= 10\n pointer1 = pointer1.next\n \n num2 = 0\n pos2 = 1\n pointer2 = list2.head\n while pointer2 != None:\n num2 += pointer2.data * pos2\n pos2 *= 10\n pointer2 = pointer2.next\n \n res = num2 + num1\n \n resList = LinkedList()\n \n \n while res != 0:\n resList.append(res%10)\n res = res//10\n \n return resList", "def _cartesian_add(xs):\n return sum(prefer_static.reshape(x, shape=[-1] + [1]*(len(xs) - 1 - i))\n for i, x in enumerate(xs))", "def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]", "def sum_squared(variable_list):\n return sum([el * el for el in variable_list])", "def no_teen_sum(a,b,c):\n list = []\n list.append(a)\n list.append(b)\n list.append(c)\n\n myTeen = fix_teen(list)\n sum = 0\n x = 0\n\n for x in myTeen:\n sum = sum + x\n\n return sum", "def sum(iterable: typing.Iterable, start: float = 0) -> float:\n return sum(iterable, start)", "def calc_e_final(all_et_lst, size_of_batch):\r\n e_final_lst = []\r\n\r\n for i in range(len(all_et_lst[0])): # For each index of the Etotal list\r\n et_sum = 0 # Sum of ETotal values with same index\r\n for lst in all_et_lst: # For each Etotal list\r\n et_sum += lst[i]\r\n\r\n e_final = (1/size_of_batch) * et_sum\r\n e_final_lst.append(e_final)\r\n\r\n return e_final_lst", "def find_sums(lst, m):\n sum = 0\n for i in range(0,len(lst)):\n if m > int(lst[i]):\n sum1 = abs(m - int(lst[i]))\n elif m < int(lst[i]):\n sum1 = abs(int(lst[i]) - m)\n else:\n pass\n sum += sum1\n return sum", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [sum(element_wise) for element_wise in zip(arr1, arr2)]", "def sum_by_elem(p,q):\n p, num1 = p\n q, num2 = q\n tup = map(sum,zip(p,q))\n return (tuple(tup),num1+num2)", "def _add_list_values(a, b):\n new_list = []\n for i in range(len(a)):\n new_list.append(a[i] + b[i])\n return new_list", "def recurrent_sum_of_elements_in_list(lst):\n if len(lst) == 0:\n return 0\n elif len(lst) == 1:\n return lst[0]\n return lst[0] + recurrent_sum_of_elements_in_list(lst[1:])", "def cumsum(lst):\n for i in range(1,len(lst)):\n lst[i] = lst[i- 1 ] + lst[i]\n return lst", "def vector_sum(vectors: List[Vector]) -> Vector:\n assert vectors, 'no vectors provided'\n\n num_elements = len(vectors[0])\n assert all(\n len(v) == num_elements for v in vectors), 'vectors must be the same length'\n\n return [sum(vec[i] for vec in vectors) for i in range(num_elements)]", "def possible_sums(numbers: Iterator[SnailfishNumber]) -> Iterator[SnailfishNumber]:\n yield from (a + b for a, b in permutations(numbers, 2))", "def test_sum_list_int(self):\n\n list_of_int = [1, 2, 3]\n result = sum(list_of_int)\n\n self.assertEqual(result, 6)", "def sumDivisor(inputList):\n result = 0\n for i in inputList:\n result += i\n return result", "def add4(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]" ]
[ "0.7886432", "0.72365195", "0.7159521", "0.7159521", "0.71008116", "0.7051906", "0.70465595", "0.7040757", "0.67752486", "0.6750484", "0.6748924", "0.6736692", "0.6732239", "0.66944194", "0.6687826", "0.667164", "0.66535944", "0.6639427", "0.66387355", "0.6596252", "0.6593165", "0.6591942", "0.6528959", "0.6508944", "0.6474637", "0.6470664", "0.64497215", "0.642719", "0.6407067", "0.6383321", "0.63731784", "0.63604164", "0.6344631", "0.63255", "0.62655187", "0.62642163", "0.6257052", "0.6241296", "0.6240392", "0.6227075", "0.62015915", "0.6158756", "0.6155163", "0.61412716", "0.61252904", "0.6125198", "0.6116218", "0.6087926", "0.6080959", "0.6061127", "0.6058157", "0.605025", "0.60412854", "0.60371536", "0.60297793", "0.6012378", "0.60121626", "0.6008704", "0.60085446", "0.6000514", "0.59946775", "0.59887254", "0.5984764", "0.59667623", "0.59632295", "0.59241736", "0.59151644", "0.5902034", "0.5901857", "0.5899011", "0.58953905", "0.58901846", "0.58653486", "0.5861609", "0.58585274", "0.5857078", "0.5855219", "0.5851678", "0.58376473", "0.58323264", "0.5820615", "0.5811716", "0.5811102", "0.58105874", "0.58071023", "0.58018506", "0.57962507", "0.5792165", "0.5778746", "0.577646", "0.57763386", "0.5775623", "0.5774247", "0.5769289", "0.57670844", "0.5765805", "0.57645655", "0.5760864", "0.5759791", "0.5759498" ]
0.71858424
2
Return list of all the divisors of an integer num.
def get_divisors(num): assert num != 0, "Num is 0" divisors = [] sq_root = int(num**0.5) for i in range(1, sq_root + 1): if num % i == 0: divisors.extend([i, num // i]) # if num has a perfect sq, that number will be added twice, then: if sq_root ** 2 == num: divisors.remove(sq_root) return divisors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple_get_divisors(num: int) -> list:\n all_divisors = []\n for possible_divisor in range(1, math.floor(num / 2) + 1):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores", "def divisor_lister(num):\n if num <= 0:\n raise ValueError('num must be a positive, non-zero number')\n\n divisors = []\n for possible_divisor in range(2, num-1):\n if num % possible_divisor == 0:\n divisors.append(possible_divisor)\n\n # 1 and num itself are divisors so throw them in there\n divisors.append(1)\n divisors.append(num)\n divisors.sort()\n return divisors", "def _find_dividers(num: int) -> List[int]:\r\n\r\n dividers: List[int] = list()\r\n while num != 1:\r\n primes = PrimeHandler.find_all_primes(num)\r\n for prime in reversed(primes):\r\n if num % prime == 0:\r\n dividers.append(prime)\r\n num = num // prime\r\n break\r\n return list(reversed(dividers))", "def divisors(num: int) -> Iterable[int]:\n assert num > 0\n if num == 1:\n yield 1\n return\n\n for divisor in range(1, int(math.sqrt(num)) + 1):\n if num % divisor == 0:\n yield divisor\n divisor_2 = num // divisor\n if divisor_2 != divisor:\n yield divisor_2\n else:\n return", "def get_dividers(num: int) -> List[int]:\r\n if num <= 0:\r\n raise ValueError\r\n\r\n if num == 1:\r\n return [1]\r\n\r\n if PrimeHandler.is_prime(num):\r\n return [num]\r\n\r\n return _find_dividers(num)", "def get_divisors_with_parity_check(num: int) -> list:\n all_divisors = []\n # if number is odd, increment by 2 because don't have to check evens\n increment = 2 if num % 2 == 1 else 1\n\n for possible_divisor in range(1, math.floor(num / 2) + 1, increment):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def divisors(n: int) -> list:\n # iterate through every number <= n/2 and check whether the number is a divisor\n # append to list if not in list\n # in the end, append the number\n divs = [n]\n for i in range(1, n//2 + 1):\n if n % i == 0:\n divs.append(i)\n return divs", "def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors", "def gatherDivisors(number): # prvni string ve funkci je comment; \"\"\" znamenam ze je na vic radek\n\tdivisors = []\n\tfor div in range(1, number + 1): # range vyhodi vse od jedne az do number\n\t\tif number % div == 0:\n\t\t\tdivisors.append(div)\n\treturn divisors", "def find_divisors_1(number):\n divisors = []\n # Test all numbers from 1 to number-1.\n # Actually, we can be more efficient with range(1, (number//2)+1)\n for n in range(1, number): \n if number % n == 0:\n divisors.append(n)\n return divisors", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def find_divisors_2(number):\n divisors = [n for n in range(1, number) if number % n == 0]\n return divisors", "def divisors(N):\n # Initialize the list of divisors\n divisor_list = [1]\n # Check division by d for d <= N/2\n for d in range(2,N // 2 + 1):\n if N % d == 0:\n divisor_list.append(d)\n divisor_list.append(N)\n return divisor_list", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def getDivisors(n):", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def get_divisors(n):\n n = abs(n)\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n divisors.append(-i)\n if i*i != n:\n divisors.append(n//i)\n divisors.append(-n//i)\n return sorted(divisors, key=abs)", "def proper_divisors(n: int) -> [int]:\n\n if n == 1:\n return []\n\n x = 2\n divisors = set([1])\n while x * x <= n and n > 1:\n if n % x == 0:\n divisors.add(x)\n divisors.add(n // x)\n\n x += 1\n\n s = sorted(divisors)\n return s", "def findDivisors(num1, num2):\n divisors = (1,)\n for i in range(2, (min(num1, num2) + 1)):\n if num1 % i == 0 and num2 % i == 0:\n divisors += (i,)\n return divisors", "def find_divisors(n: int) -> Set[int]:\n divisors = {1, n}\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n // i)\n return divisors", "def divisors(n):\n dvs = []\n for i in range(1, int(math.sqrt(n)) + 1):\n if n % i == 0:\n dvs.append(i)\n j = n / i\n if j != i:\n dvs.append(j)\n\n dvs.remove(n)\n return dvs", "def list_of_divisors_v2(n):\n return list(divisorGen(n))", "def divisors(x):\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x//i)\n return sorted(distinct(result))", "def divisors(n):\n d = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n d.append(i)\n d.append(n / i)\n return set(d)", "def divisors(number: int) -> Set[int]:\n\n if number == 0:\n return {0}\n divisor = 2\n while divisor * divisor <= number:\n if number % divisor == 0:\n smaller_result = divisors(number // divisor)\n multiplied_result = {d * divisor for d in smaller_result}\n\n return smaller_result | multiplied_result\n divisor = divisor + 1\n\n return {1, number}", "def divisors(intgr):\n\tdivisors = []\n\tfor i in range(1,intgr+1):\n\t\tif(intgr%i==0):\n\t\t\tdivisors.append(i)\n\treturn divisors[1:-1]", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def num_divisors(n):\n divisors = []\n for i in range(1, int(n**0.5) + 1):\n if n % i == 0:\n divisors += {i, n //i}\n return divisors", "def get_divisors(n, includeN=True):\n lower_divisors, upper_divisors = [], []\n i = 1\n while i * i <= n:\n if n % i == 0:\n lower_divisors.append(i)\n if i != n // i:\n upper_divisors.append(n//i)\n i += 1\n upper_divisors = upper_divisors[::-1]\n if not includeN:\n upper_divisors.pop()\n return lower_divisors + upper_divisors", "def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def find_divisors(integer):\n\n divisors = []\n # we know that an integer divides itself\n divisors.append(integer)\n # we also know that the biggest divisor other than the integer itself\n # must be at most half the value of the integer (think about it)\n divisor = integer / 2\n\n while divisor >= 0:\n if is_divisible(integer, divisor):\n divisors.append(divisor)\n divisor =- 1\n\n return divisors", "def proper_divisors(n):\n l = [1]\n if n == 1 or n == 2:\n return l\n else:\n limit = math.floor(n/2) + 1\n for i in range(2, limit):\n if n % i == 0:\n l.append(i)\n return l", "def proper_divisors(number):\n factors = (divisors(number))\n factors.remove(number)\n return factors", "def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs", "def list_of_divisors_v1(n):\n \"\"\"\n This is a slow algorithm. But it is correct.\n \"\"\"\n if n == 1:\n return [1]\n if n == 2:\n return [1,2]\n L = {}\n if n > 0:\n L[1] = True\n if n > 1:\n L[n] = True\n for i in list_of_prime_factors(n):\n L[i] = True\n for j in list_of_divisors(n // i):\n L[j] = True\n return L.keys()", "def proper_divisors(n):\n divisors = set([1])\n for i in range(2, int(ceil(sqrt(n)))+1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n/i)\n return divisors", "def find_factors(num):\n factors = set()\n i = 1\n while i*i < num:\n if num % i == 0:\n factors.add(i)\n factors.add(int(num/i))\n i+=1\n factors = list(factors)\n factors.sort()\n return factors", "def restricted_divisors(x):\n return divisors(x)[1:-1]", "def divisors(n):\n return tuple(_divisor_gen(n))", "def proper_divisors(x):\n return divisors(x)[:-1]", "def d(n):\n divisors = []\n for i in range(1, n):\n if n % i == 0:\n divisors.append(i)\n return sum(divisors)", "def find_proper_divisors(n: int) -> Set[int]:\n\n divisors = find_divisors(n)\n return divisors - {n} # without n", "def Prime_div(num,Prime,divisors):\r\n if divisors == []: #if this is the first call add one to list all numbers div by 1\r\n divisors.append(1)\r\n \r\n if Prime.isPrime(num):#base case. when prime number is found we have found all Prime divisors\r\n temp = divisors \r\n divisors.append(num)\r\n \r\n \r\n return divisors\r\n else:# testing case. If we have not found our base prime, then we keep calling the function\r\n Prime.reset()\r\n factor = Prime.nextPrime()\r\n found = False#Set to false in each call \r\n while factor < num/2 and not found:\r\n if num%factor == 0:\r\n divisors.append(factor)#Appends a found prime divisor to our list\r\n found = True\r\n if not found:\r\n factor = Prime.nextPrime()#Cycles through prime list until we find a prime the number is divisible by\r\n return Prime_div(num/factor,Prime,divisors)", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def divisors(decomp):\n combine = lambda acc, p: set(a * (p ** e) for a in acc for e in xrange(decomp[p] + 1))\n return reduce(combine, decomp, {1})", "def sum_of_proper_divisors(number: int):\n divisors = []\n\n for n in range(1, number):\n if number % n == 0:\n divisors.append(n)\n\n return sum(divisors)", "def findDivisors(n1, n2):\n divisors = () # the empty tuple\n for i in range(1, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n divisors = divisors + (i,)\n return divisors", "def find_prime_divisors(self, num):\n # If the number is prime, it is only divisible by itself.\n if pe_005.is_prime(num) or num < 2:\n return {num: 1}\n\n # If there were no primes searched for, then search for primes.\n if len(self._primes) <= 0:\n self.find_primes(num)\n\n results = dict()\n # Loop through the sorted primes list and stop when the prime is larger than the given number.\n for prime in self._primes[::-1]:\n if num <= 0:\n break\n\n # Count the number of divisions of the prime number into the current number.\n count, num = pe_005.count_divisions(num, prime)\n if count > 0:\n results[prime] = count\n\n return results", "def prime_factors(num):\n prime_factors = []\n for i in range(2, num + 1):\n if (num % i) == 0 and is_prime(i) == True:\n prime_factors.append(i)\n return prime_factors", "def prime_factors(num):\n result = []\n for i in range(2, num):\n if (is_prime(i)) and (num % i == 0):\n result.append(i)\n if not result:\n print(\"No prime factors\")\n else:\n return result", "def divisori(n):\n div=set()\n for i in range(1,int(n**0.5+1)):\n if n%i==0:\n div.add(int(n/i))\n div.add(i)\n return sorted(div)", "def list_of_divisibles(n):\n def is_prime(x, L = []):\n if x in L or x == 2:\n return True\n elif x == 1 or x % 2 == 0:\n return False\n for divisor in range(1, round(x ** .5)):\n if is_prime(divisor, L):\n if x % divisor == 0:\n return False\n return True\n \n def largest_exponent(i, n):\n \"\"\"\n Given a limit n and a base i, finds the largest exponenet x such that i ^ x <= n, and outputs i ^ x.\n\n \"\"\"\n x = 1\n while i ** x <= n:\n x += 1\n x -= 1\n print(i, x, i**x)\n return i ** x\n \n L = []\n for i in range(2, n+1):\n if i in L:\n continue\n elif is_prime(i):\n L.append(largest_exponent(i, n))\n return L", "def divisors(factors):\n ps = sorted(set(factors))\n omega = len(ps)\n\n def rec_gen(n=0):\n if n == omega:\n yield 1\n else:\n pows = [1]\n for j in xrange(factors.count(ps[n])):\n pows += [pows[-1] * ps[n]]\n for q in rec_gen(n + 1):\n for p in pows:\n yield p * q\n\n for p in rec_gen():\n yield p", "def factors(num):\n\tif is_prime(num) == True:\n\t\tfactors = [1, num]\n\t\treturn factors\n\telse:\n\t\tfactors = [1]\n\t\tsquare_root = int(math.ceil(math.sqrt(num)))\n\t\t\n\t\tfor n in range(2, square_root+1):\n\t\t\tif num % n == 0:\n\t\t\t\tfactors.append(n)\n\n\t\tfor n in range(1, len(factors)):\n\t\t\tnew_n = num / factors[n]\n\t\t\tif new_n not in factors:\n\t\t\t\tfactors.append(num / factors[n])\n\n\t\tfactors.append(num)\n\t\treturn factors", "def exercise_b2_24():\r\n number = input(\"Insert the number: \")\r\n flag = 0\r\n count = 0\r\n divisors_list =[]\r\n while flag <= int(number):\r\n flag +=1\r\n if (int(number) % flag) == 0:\r\n count += 1\r\n divisors_list.append(flag)\r\n print(\"\"\"\\nThe amount of divisors are: %s\"\"\"\r\n \"\"\"\\nThe numbers are: %s\\n\"\"\" % (count, divisors_list))\r\n return", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def get_divisors_sum(number):\n if number == 0:\n return 0\n\n divisors_list = []\n for i in range(number+1):\n j = i + 1\n if number % j == 0:\n divisors_list.append(j)\n\n return sum(divisors_list)", "def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors", "def division(self, num, denom):\n inverse = self.extended_euclides(denom, self.prime)[0] # Apply our extended euclidean algo and obtain the first item of the list, this sould be the inverse\n return num * inverse # Multiply the inverse for the numerator to obtain quotient", "def getNumDivisors(n):\n\n n = abs(int(n))\n\n r = 1\n i = 2\n while i <= n:\n a = 0\n while n % i == 0:\n n = n / i\n a = a + 1\n r = r * (a + 1)\n i = i + 1\n\n return r", "def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors", "def num_divisors(n):\n\tif n < 2:\n\t\treturn 1 \t# not really correct\n\t\n\tdivisors = 1\n\ti = 2\n\n\twhile n > 1:\n\t\tp = 0 \t# p will be the maximum x such that i^x evenly divides n\n\n\t\t# repeatedly divide n by i, and store the number of times into p\n\t\twhile (n % i == 0):\n\t\t\tn = n / i\n\t\t\tp += 1\n\n\t\tdivisors = divisors * (p + 1)\n\t\ti += 1\n\n\treturn divisors", "def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors", "def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]", "def divisor_counter(num):\n if num <= 0:\n raise ValueError('num must be a positive, non-zero number')\n\n divisors = 0\n num_sq_rt = num ** .5\n for possible_divisor in range(1, int(num_sq_rt)):\n if num % possible_divisor == 0:\n divisors += 1\n\n divisors *= 2\n # If num is a perfect square, we have to subtract one so we only count\n # the square root once. i.e. if num is 16, we only want to count 4 once\n if num_sq_rt.is_integer():\n divisors -= 1\n return divisors*2", "def primefactors(num):\n\n while num % 2 == 0:\n print(2)\n num = num / 2\n for i in range(3,int(math.sqrt(num))+1,2):\n while ( num % i == 0 ):\n print (i)\n num = num / i\n if num > 2:\n print (num)", "def selfDividingNumbers(left, right):\n ret = []\n bounds = list(range(left, right + 1))\n \n for num in bounds:\n div = True\n if '0' in str(num):\n pass\n elif num < 10:\n ret.append(num)\n else:\n for n in str(num): \n if num % int(n) !=0:\n div = False\n if div is True:\n ret.append(num) \n return ret", "def d(n):\n return sum(divisors(n))", "def get_prime_factors(num: int, prime_list: list = None) -> list:\n upper_bound = math.ceil(num / 2) + 1\n if not prime_list:\n prime_list = [prime for prime in primes.Primes(upper_bound)]\n\n prime_factors = []\n for prime in prime_list:\n temp = num\n multiplicity = 0\n temp, remainder = divmod(temp, prime)\n while remainder == 0 and temp >= 1:\n multiplicity += 1\n temp, remainder = divmod(temp, prime)\n if multiplicity > 0:\n prime_factors.append((prime, multiplicity))\n if prime > upper_bound:\n break\n\n if not prime_factors:\n prime_factors = [(num, 1)]\n\n return prime_factors", "def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)", "def get_count_of_divisors_by_number(self, number):\n if int(number) < 1:\n print \"this method needs number >= 1\"\n return 0\n if int(number) == 1:\n return 1\n # n = (a ** p) * (b ** q) * (c ** r) のとき、\n # n の約数は (p + 1) * (q + 1) * (r + 1) で求められる\n factors = self.get_prime_factors_by_number(number)\n patterns = factors.values()\n patterns_considered_power_of_zero = map(lambda x: x + 1, patterns)\n ret = reduce(lambda x, y: x * y, patterns_considered_power_of_zero)\n return ret", "def factors(number):\n\n if not (isinstance(number, int)):\n raise TypeError(\n \"Incorrect number type provided. Only integers are accepted.\")\n\n factors = []\n for i in range(1, number + 1):\n if number % i == 0:\n factors.append(i)\n return factors", "def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors", "def properdivisors(n):\n propdiv = [1]\n start, step = [2, 1]\n\n # Odd numbers only have odd divisors\n if n % 2 == 1:\n start, step = [3, 2]\n\n for i in range(start, ceil(sqrt(n)), step):\n if n % i == 0:\n propdiv.extend([i, n//i])\n\n # If n is a perfect square, also add the square root.\n # Note: this does not work for VERY LARGE n.\n if sqrt(n).is_integer() and n != 1:\n propdiv.append(int(sqrt(n)))\n\n return(propdiv)", "def get_sum_of_proper_divisors(num: int, prime_factors: list = None) -> int:\n if not prime_factors:\n prime_factors = get_prime_factors(num)\n\n sum_proper_divisors = 1\n for prime_factor, multiplicity in prime_factors:\n temp_sum = 0\n for i in range(multiplicity + 1):\n temp_sum += prime_factor ** i\n sum_proper_divisors *= temp_sum\n\n return sum_proper_divisors - num", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def divisible_by(array, divisor):\n return_list = list()\n for i in array:\n if i % divisor == 0:\n return_list.append(i)\n return return_list", "def pair_divisors(n):\n primes_that_factor_into_n = []\n max_power_of_prime = 1\n\n yield (1, n)\n for i in xrange(2, int(sqrt(n))+1):\n if n % i == 0:\n yield (i, n/i)", "def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def get_decompositions(num: int, parts: int) -> Iterator[List[int]]:\n num_factors = PrimeFactors.decompose(num)\n return _get_decompositions(num_factors, parts)", "def div(a, x):\n return [a[i]/x for i in range(2)]", "def listDivide(numbers, divide=2):\n newList = []\n for i in numbers:\n if i % divide == 0:\n newList.append(i)\n return len(newList)", "def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors", "def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors", "def _divisor_gen(n):\n # Refactoring of \"What is the best way to get all the divisors of a number\"\n # at http://stackoverflow.com/a/171784.\n if n == 1:\n yield 1\n return\n primes, multiplicities = split(prime_factorization(n))\n # Since factors are prime, each partition of powers is a different divisor.\n for exponents in productrange(*[m+1 for m in multiplicities]):\n yield prod(p**e for p, e in zip(primes, exponents))", "def find_multiple(self, num):\n result = dict()\n for n in range(1, num+1):\n temp = self.find_prime_divisors(n)\n result.update({k:v for k,v in temp.items() if k not in result or result[k] < v})\n return reduce(operator.mul, (pow(k, v) for k,v in result.items()))", "def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist", "def sum_divisors(n):\r\n return sum(proper_divisors(n)) + n", "def find_factors(number):\n \n i = 2\n prod = 1\n factors = []\n sqrt = math.sqrt(number)\n num = number\n \n while i < num:\n div = check_divisbility(number, i)\n if div == 'divisible':\n factors.append(i)\n number /= i\n prod *= i\n recurse = find_factors(number)\n \n #I recurse here because it prevents us wasting time playing with large numbers\n for fac in recurse:\n factors.append(fac)\n number /= fac\n prod *= fac\n #stop if we find a factor greater tha sqrt(number)\n if i >= sqrt:\n break\n #make sure we're not looking once we find all the factors \n if prod == num:\n break\n else:\n if i> sqrt:\n if len(factors)==0:\n factors.append(num)\n prod *= num\n else: \n print i\n recurse = find_factors(number)\n for fac in recurse:\n factors.append(fac)\n prod *= fac\n if prod == num:\n break\n i = i+1\n if prod != num:\n raise ValueError (\"This isn't right\")\n return factors", "def count_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def get_prime_factors(self, number):\n for prime in self.get_primes():\n while number % prime == 0:\n yield prime\n number /= prime\n \n if number == 1:\n break", "def generate_prime_factors(number):\n if not isinstance(number, int):\n raise ValueError\n list_of_ints = []\n if number > 1:\n remainder = number\n divisor = 2\n while remainder != 1:\n if remainder % divisor == 0:\n list_of_ints.append(divisor)\n remainder = remainder / divisor\n else:\n divisor += 1\n return list_of_ints", "def halvesies(numbers):\n halves = []\n for number in numbers:\n halves.append(number / 2)\n return halves", "def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))", "def divisors_sum(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(1, upper + 1):\n for j in range(i, upper + 1, i):\n nums[j] += i\n return nums" ]
[ "0.87616765", "0.84940773", "0.8239428", "0.8224629", "0.81623006", "0.80934465", "0.8033521", "0.78625435", "0.78059375", "0.7753855", "0.7717071", "0.76835775", "0.767592", "0.7558653", "0.75212115", "0.74451685", "0.74397975", "0.7423215", "0.74036175", "0.7400764", "0.739594", "0.7364428", "0.73128545", "0.72808325", "0.7275193", "0.7252872", "0.7238882", "0.7215149", "0.71972674", "0.71896285", "0.7188845", "0.7156517", "0.71500003", "0.7139932", "0.7130494", "0.6993159", "0.6935043", "0.69118917", "0.6903945", "0.6810671", "0.6808609", "0.6763997", "0.67535377", "0.6736255", "0.6733227", "0.6711459", "0.6710741", "0.66715795", "0.66548705", "0.6651913", "0.6599429", "0.65751266", "0.65523875", "0.6531988", "0.65287477", "0.6486971", "0.6438566", "0.64296484", "0.6403632", "0.6403632", "0.639065", "0.6340056", "0.63031113", "0.6291424", "0.6250679", "0.6236351", "0.6232696", "0.6196316", "0.61375475", "0.6119234", "0.6106158", "0.6097803", "0.6096354", "0.6086833", "0.60729504", "0.6060726", "0.6050045", "0.6045487", "0.6039719", "0.603209", "0.6014174", "0.5982857", "0.5950519", "0.5913077", "0.590388", "0.58659905", "0.58300227", "0.58243275", "0.5823871", "0.58186746", "0.58095556", "0.5807124", "0.58052826", "0.5792348", "0.57911205", "0.57786995", "0.5778061", "0.5773693", "0.57521576", "0.5749637" ]
0.8431378
2
Get absolute path to resource, to get logo
def resource_path(relative_path): try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logo_url(self):\n return self.get_url(\"logo\", \"images/logo.png\")", "def logo_uri(self) -> str:\n return pulumi.get(self, \"logo_uri\")", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def get_client_company_logo_dir(self, filename):\n return f\"clients/company/{self.id}/{filename}\"", "def app_logo_url():\n return \"https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/img/logo.png\"", "def logo_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"logo_uri\")", "def logo():\n return os.getenv(\"LOGO\", \"static/logo.png\")", "def logo_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"logo_url\")", "def logo_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"logo_url\")", "def get_png_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES, \"img\", \"png\"\n )", "def getImagePath():\n currentPath = os.path.dirname(__file__)\n resourcesPath = os.path.join(currentPath, \"Resources\")\n imagesPath = os.path.join(resourcesPath, \"Images\")\n return imagesPath", "def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"", "def iconPath(icon):\n return resourcePath(icon, dirname=\"icons\")", "def logo_image(self):\n return self.company_logo or \"upload/default_avatar.gif\"", "def logo_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_uri\")", "def logo_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_uri\")", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def _getFullPath(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self)._getFullPath(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def logo(self) -> str:\n return self._logo", "def _get_image_absolute_path(image_scraper_model: ImageScraperModel) -> str:\n return image_scraper_model.image.path", "def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)", "def image_url(self):\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field:\n field = context.getField(IMAGE_FIELD_NAME)\n\n if field and field.get_size(context) > 0:\n return u'%s/%s_%s' % (obj_url, field.getName(), 'thumb')\n\n return u\"%s/isaw_logo.png\" % self.portal.absolute_url()", "def logo(self):\n from app import textify\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['logo_full']\n url = self.app.url_for(\"asset\", asset_id = v._id, _full = True)\n alt = 'Logo '+self.barcamp.name# + \" - \" + textify(self.barcamp.seo_description)\n alt = alt.replace('\"', '&quot;')\n alt = alt.replace(\"'\", '&quot;')\n return \"\"\"<a title=\"%s\" href=\"%s\"><img alt=\"%s\" class=\"img-responsive\" src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.barcamp.name,\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n alt,\n url,\n v.metadata['width'],\n v.metadata['height'])", "def get_absolute_resource_path(resource_path):\n return pkg_resources.resource_filename(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def logo(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"logo\")", "def logo(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"logo\")", "def logo_url(self):\n asset = self._get_image(self.barcamp.logo)\n if asset is None:\n return None\n uf = self.app.url_for\n return dict(\n [(vid, uf('asset', asset_id = asset._id)) for vid, asset in asset.variants.items()]\n )", "def get_icon():\n icon = Path(__file__).parent.joinpath(\"resources\", \"icon.png\")\n # We just want the string to the path for PySide.\n return str(icon)", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def og_logo(self):\n # first try fb logo\n uf = self.app.url_for\n img = self._get_image(self.barcamp.fb_image)\n if img is None:\n img = self._get_image(self.barcamp.logo)\n if img is None:\n return \"\" # no url\n\n v = img.variants.get('facebook', None) # fb size\n if v is None:\n return \"\"\n return self.app.url_for(\"asset\", asset_id = v._id, _full=True)", "def path(self) -> str:\n return self.src + \"/\"", "def get_image_url():", "def get_resources_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES\n )", "def logo_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_url\")", "def logo_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_url\")", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def resource_path(relative_path):\n return os.path.join(BASEPATH, relative_path)", "def logo_image(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"logo_image\")", "def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def resource_path(self, resource):\n return str(self.path.joinpath(resource))", "def resource_path(self, resource):\n return str(self.path.joinpath(resource))", "def logo(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo\")", "def logo(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo\")", "def logo(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo\")", "def logo(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo\")", "def get_image(self, img):\n if img == \"rss\":\n img = \"feed-icon-16x16.png\"\n loc = os.path.abspath(os.path.dirname(__file__))\n img = os.path.join(loc, img)\n if not os.path.exists(img):\n raise FileNotFoundError( # pragma: no cover\n f\"Unable to find {img!r}.\")\n return img\n else:\n raise FileNotFoundError( # pragma: no cover\n f\"Unable to get image name: {img!r}.\")", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def app_logo_img():\n return base64.b64decode(\n b\"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEX/TQBcNTh/AAAAAXRSTlPM0jRW/QAAAApJREFUeJxjYgAAAAYAAzY3fKgAAAAASUVORK5CYII=\"\n )", "def build_path(self, section: str, scr_name: str) -> str:\n base = self.root.joinpath(section).joinpath(f\"{scr_name}.png\")\n os_ver = env.version[1] if env.version < (11, 0) else env.version[0]\n os_specific = self.root.joinpath(section).joinpath(f\"{scr_name}_{os_ver}.png\")\n return (os_specific if os_specific.exists() else base).as_posix()", "def logo_small_url(self):\n return self.get_url(\"logo_small\", \"images/logo-small.png\")", "def resource_path(self, resource):\n # type: (Text) -> Text\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError", "def getImagePath(self)->str:\n\n returnStr = '../../../../assets/image/{}.png'.format(randint(1,15))\n return returnStr", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def get_full_path(_path: str):\n if _path is None:\n return None\n\n with open(r\"bot\\data\\image_config.json\") as f:\n try:\n image_config = json.load(f)\n\n except json.decoder.JSONDecodeError as e:\n print(e)\n return None\n\n base_url = image_config.get(\"base_url\")\n poster_size = image_config.get(\"poster_sizes\")[-2]\n return f\"{base_url}{poster_size}{_path}\"", "def or_meta_image_url(context):\n try:\n request = context['request']\n absolute_url = request.build_absolute_uri(OR_META_IMAGE_URL)\n except KeyError:\n absolute_url = BASE_URL + OR_META_IMAGE_URL\n return absolute_url", "def resourcePath(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)", "def logo_image(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_image\")", "def logo_image(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_image\")", "def resource(request):\n local_path = os.path.dirname(request.module.__file__)\n return lambda *args: get_resource_path(args, local_path)", "def _target_sprite_sheet_path(self) -> Path:", "def create_icon_url(cls, name):\n return os.path.join(RESOURCE_FOLDER, name)", "def resource_path(self, relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n # return os.path.join(base_path, relative_path)\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)", "def get_image_path_instrument1(instance, filename):\n return os.path.join('uploads', 'instrument', str(instance.siteId.pk))", "def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None", "def path_for(filename):\n if settings.value(Key.Theme) == Themes.Light.value:\n return (IMAGES_PATH / Themes.Light.value / filename).as_posix()\n return (IMAGES_PATH / Themes.Dark.value / filename).as_posix()", "def resourcePath(self,relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n output = base_path + relative_path\n return output", "def logo_upload_to(instance, filename):\n _, extension = splitext(filename)\n filename = f\"programs/{instance.slug}_logo{extension}\"\n return filename", "def get_image_path(self):\n\t\treturn call_sdk_function('PrlVmDev_GetImagePath', self.handle)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def resourcePath(relative, dirname=\"data\"):\n # first look in pyinstaller bundle\n if hasattr(sys, \"_MEIPASS\"):\n path = os.path.join(sys._MEIPASS, dirname)\n \n else:\n # then look in py2app bundle\n path = os.environ.get(\"RESOURCEPATH\", None)\n if path is None:\n # then look in source code directory\n path = os.path.join(RESOURCE_BASE, dirname)\n \n path = os.path.join(path, relative)\n \n return path", "def imagePath(image):\n return os.path.join(\":/images\", image)", "def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )", "def get_style_img_path(style):\n return f\"images/style/{style}.jpg\"", "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "def get_pathname(self):\n return self.image_data.path", "def imagePath(self):\n return self.path", "def target_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"target_resource_path\")", "def steam_api_url_logo(app_id, app_logo_url):\n return configuration.steam_logo_url + \"/\" + str(app_id) + \"/\" + app_logo_url + \".jpg\"", "def resource_path(relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def _path(self):\n path = REQUIRES['static_url']\n\n # add paths as specified\n for prefix, subpath in self.getPrefixDict().items():\n if ( self.filename.startswith(prefix) ):\n path += subpath\n break;\n\n return path", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def getIconPath(self): #$NON-NLS-1$\r\n icon = self.getIcon()\r\n if not icon:\r\n return None\r\n return self.extensionPoint.getPlugin().getResourceRegistry().getImagePath(icon)", "def get_logo(img_or_path):\n if not isinstance(img_or_path, str):\n return np.asarray(img_or_path)\n path = img_or_path\n if path.startswith('gs://') or path.startswith('gcs://'):\n _, path = path.split('//', 1)\n local_path = logo_dir / os.path.basename(path)\n if not local_path.exists():\n fs = gcsfs.GCSFileSystem()\n local_path.parent.mkdir(parents=True, exist_ok=True)\n fs.get_file(path, local_path)\n else:\n local_path = Path(path)\n if not local_path.is_absolute():\n local_path = logo_dir / path\n return skio.imread(local_path)", "def resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def get_img_path(self, inp=None, aux=None):\n img_path = \"\".join([\n self._report_generator.config.img_dir,\n os.sep,\n self.get_tag(inp, aux),\n \".png\"])\n return img_path", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource(self):\n from rowgenerators import parse_app_url # Here, to break an import cycle\n\n self._resource = self._downloader.download(self.inner)\n\n\n ru = parse_app_url(self._resource.sys_path,\n downloader=self.downloader,\n scheme_extension=self.scheme_extension,\n **self.frag_dict)\n\n\n return ru", "def get_asset_path(name):\n return os.path.join(constants.ROOT_DIR, 'assets', name)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n path = os.path.join(base_path, relative_path)\n return path", "def to_file_path(self, resourcePath: str) -> PurePath:\n rel = resourcePath.replace('res://', '')\n return self._root.joinpath(rel)", "def get_default_data_image_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, \"um_000000.png\"))", "def ResourcePath(self, name):\n pass", "def _getImagePath(self, link):\n return settings.WIKI_UPLOAD_URL + urlquote(link)" ]
[ "0.72962266", "0.719628", "0.715207", "0.7071001", "0.6983272", "0.69696295", "0.6932713", "0.6828541", "0.6828541", "0.6821016", "0.68035823", "0.6728892", "0.67225164", "0.67204624", "0.6692771", "0.6692771", "0.6684799", "0.6639405", "0.6596103", "0.65899694", "0.6586349", "0.65708053", "0.656119", "0.6557243", "0.6513746", "0.6513746", "0.6512983", "0.6504911", "0.6453024", "0.64487016", "0.6441987", "0.643983", "0.6415038", "0.6396769", "0.6396769", "0.63797885", "0.63764596", "0.6372777", "0.63599306", "0.63527817", "0.6336206", "0.6336206", "0.63221705", "0.63221705", "0.63221705", "0.63221705", "0.6320502", "0.6317051", "0.63089424", "0.6305118", "0.62705475", "0.62671727", "0.6247992", "0.62468165", "0.6204965", "0.6197523", "0.6178249", "0.6176438", "0.6176438", "0.6176313", "0.6168858", "0.6158824", "0.6141798", "0.6141396", "0.6128697", "0.61255634", "0.6120289", "0.61192113", "0.6083682", "0.6083242", "0.60751224", "0.6060397", "0.60585296", "0.605608", "0.60247976", "0.6022582", "0.60181826", "0.6008583", "0.6007027", "0.600624", "0.6003352", "0.5993936", "0.5991964", "0.5985476", "0.5985476", "0.5985476", "0.5985476", "0.5985476", "0.59853345", "0.59771824", "0.59758765", "0.5965547", "0.5963983", "0.5963781", "0.5958336", "0.5951344", "0.5950794", "0.59410244", "0.5939669", "0.5938539", "0.593399" ]
0.0
-1
function looping through other functions to build tree and then save it
def loop_trough_row(var, name_col, list_error, list_of_project_info): sheet = get_excel(exceldokument) i = 3 while i < sheet.nrows: try: file_name = str(sheet.cell_value(i, name_col)) if file_name != "": tree, safecookie, steps, prev = createxmlmall() list_error = loop_through_col(steps, safecookie, i, file_name, var, list_error, list_of_project_info) # save_xml(tree, (file_name) + ".xml", folder_name) for errors in list_error: if errors.error_type == "4": return list_error save_xml(tree, file_name + ".tcs", folder_name) else: for l in range(sheet.ncols): if str(sheet.cell_value(i, l)) != "": p = AddFileWithError(i + 1, "3") list_error = p.add_el(list_error, p) i += 1 except: p = AddFileWithError(i + 1, "1") list_error = p.add_el(list_error, p) return list_error return list_error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_tree(self, prefix, depth):\n for count, function in [[self.n_files, self.make_file],\n [self.n_children, self.make_child_recurse],\n [self.n_symlinks, self.make_symlink]]:\n for i in range(count):\n if not self.can_continue():\n return\n name = os.path.join(prefix, self.name_gen.next())\n function(name, depth)", "def save_filter_tree(self,root,good_nodes):\n date = str(datetime.now())[5:10]\n filename = \"./log/filter_tree_\" + date\n if os.path.isfile(filename):\n os.remove(filename)\n if good_nodes == []:\n with open(filename,\"a+\") as inp:\n inp.write(\"root\\n\")\n else:\n for node in good_nodes:\n node.save_node(filename)\n with open(filename,\"a+\") as inp:\n inp.write(\"root\\n\")\n \n parent = root\n curr_node = root.children[0]\n while True:\n curr_node.save_node(filename)\n if curr_node.children == []:\n while parent.next_child(curr_node) is None:\n if parent == root and parent.next_child(curr_node) is None:\n return\n curr_node = parent\n parent = curr_node.parent\n curr_node = parent.next_child(curr_node)\n else:\n parent = curr_node\n curr_node = parent.children[0]", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def create_rooted_trees_from_dir(paths, fout, outgroup):\n #pdb.set_trace()\n fout = open(fout, 'w')\n for count, path in enumerate(paths):\n base_path, tree_file_name = os.path.split(path)\n #pdb.set_trace()\n fin = open(path)\n for tree in fin:\n tree = tree.strip()\n tree = Tree(tree)\n tree.set_outgroup(outgroup)\n newick = tree.write(format=5) + '\\n'\n fout.write(newick)\n print count+1\n fout.close()", "def tree_construct(self, *args, **kwargs):\n l_files = []\n d_constructCallback = {}\n fn_constructCallback = None\n d_probe = {}\n l_range = []\n\n for k, v in kwargs.items():\n if k == 'l_files': l_files = v\n if k == 'constructCallback': fn_constructCallback = v\n if k == 'd_probe': d_probe = v\n\n if d_probe: l_files = d_probe['l_files']\n index = 0\n total = len(l_files)\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(l_files, desc = ' Constructing tree')\n else:\n l_range = l_files\n for l_series in l_range:\n if len(l_series):\n str_path = os.path.dirname(l_series[0])\n l_series = [ os.path.basename(i) for i in l_series]\n # self.simpleProgress_show(index, total)\n self.d_inputTree[str_path] = l_series\n if fn_constructCallback:\n kwargs['path'] = str_path\n d_constructCallback = fn_constructCallback(l_series, **kwargs)\n self.d_inputTreeCallback[str_path] = d_constructCallback\n self.d_outputTree[str_path] = \"\"\n index += 1\n return {\n 'status': True,\n 'd_constructCallback': d_constructCallback,\n 'totalNumberOfAllSeries': index,\n 'd_probe': d_probe\n }", "def save_node(self):\n # save node in path2node\n if self.full_path in self.file.path2node:\n print \"** Error, created node with path twice:\\n%s\" % self.full_path\n traceback.print_stack()\n sys.exit(1)\n self.file.path2node[self.full_path] = self \n # save node in id_lookups\n id = self.sdef['id']\n ns = self.sdef['ns']\n type = self.sdef['type']\n custom = 'custom' in self.sdef and self.sdef['custom']\n if self.parent is None and self.sdef['df'] and not custom:\n # structure (not custom) created at top level, save in id_lookups\n if id not in self.file.id_lookups[ns]:\n print \"** Error: Unable to find id '%s' in id_lookups when saving node\" % id\n traceback.print_stack()\n sys.exit(1)\n if self.path not in self.file.id_lookups[ns][id]:\n print (\"** Error: Unable to find path '%s' in id_lookups when\"\n \" saving node %s\") % (self.path, id)\n print \"self.sdef['df'] is:\"\n pp.pprint (self.sdef['df'])\n traceback.print_stack()\n sys.exit(1)\n self.file.id_lookups[ns][id][self.path]['created'].append(self)\n # save node in all_nodes, either at top level (if no parent) or inside\n # mstats structure of parent node\n if self.parent is None:\n if self.path in self.file.all_nodes:\n self.file.all_nodes[self.path].append(self)\n else:\n self.file.all_nodes[self.path] = [self, ]\n else:\n if id not in self.parent.mstats:\n if custom:\n # custom node created, add id to mstats of parent\n self.parent.mstats[id] = { 'df': {}, 'type':type, 'ns': ns,\n 'created': [ self, ], 'qty':'?' }\n else:\n print \"** Error: Unable to find key '%s' in parent mstats\" % id\n print \"self.parent.mstats is\"\n pp.pprint (self.parent.mstats)\n traceback.print_stack()\n sys.exit(1)\n else: \n # append node to parent created mstats \n self.parent.mstats[id]['created'].append(self)", "def convert_treebank(input_dir, output_dir, strategy, subtask):\n\n for f in input_dir.iterdir():\n with open(f, \"r\") as json_file:\n docs = json.load(json_file)\n trees = \"\"\n for doc in docs[\"docs\"]:\n for sent in doc[\"sents\"]:\n graph = sent[\"graph\"]\n if strategy == \"start\":\n tree = traverse_graph_start(graph)\n elif strategy == \"start-without-pos\":\n tree = traverse_graph_start_without_pos(graph)\n elif strategy == \"end\":\n tree = traverse_graph_end(graph)\n elif strategy == \"end-extra-node\":\n tree = traverse_graph_end_extra_node(graph)\n elif strategy == \"start-end-extra-node\":\n tree = traverse_graph_start_end_extra_node(graph)\n elif strategy == \"start-end-extra-node-heuristic\":\n tree = traverse_graph_start_end_extra_node_heuristic(graph) \n if subtask:\n tree = subtask_prune(tree)\n tree_string = get_string(tree)\n trees += tree_string + \"\\n\"\n with open(output_dir.joinpath(f.name).with_suffix(\".txt\"), \"w+\") as tree_files:\n tree_files.write(trees)", "def build(root):", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def create_tree(outFile, tree, path='/'):\n for key, foo in tree.list():\n if outFile.has_node(path, key):\n logging.debug('Path already found:', path, key)\n continue\n logging.debug('Creating group:', path, key)\n outFile.create_group(path, key, key)\n dest = path + key + '/'\n if outFile.has_node(dest):\n continue\n create_tree(outFile, tree.child(key), dest)", "def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)", "def export_call_trees_to_editor():\n\n def dump_tree_model(model, indent):\n values = []\n if model:\n for row in model:\n first = row[0]\n if first == 'computing...':\n return []\n if not row[1] or \\\n first.endswith(' called by ') or \\\n first.endswith('calls '):\n values.append(indent + first)\n else:\n values.append(indent + first + '\\t\\t{}'.format(row[1]))\n\n values.extend(\n dump_tree_model(row.iterchildren(), indent + \" \"))\n return values\n\n m = pygps.get_widget_by_name(\"Call Graph Tree\").get_model()\n text = '\\n'.join(dump_tree_model(m, \"\"))\n\n # Open an editor and write the contents\n\n GPS.execute_action(\"new file\")\n buf = GPS.EditorBuffer.get()\n buf.delete() # in case some template was inserted\n buf.insert(buf.at(1, 1), text)", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def filetree(self) -> P:\n ...", "def writeGraph2File(self, file, genGraph=1, isRootNode=0, rootNodeName = \"rootNode\", \\\r\n indent=\" \", genConstraints = 0, fileName = '', genGGcode = 0, parentName=\"self\", \\\r\n genImports = 0, depth = 1, nodesToGenList = [] ):\r\n\r\n # generate code for the nodes...\r\n counter =0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\t\t\t\t\t\t# iterate on all the node types...\r\n for node in self.listNodes[nodetype]:\t\t\t\t\t# Iterate on all the nodes of each type\r\n node.genAttributesCode(file, genGraph, None, isRootNode, rootNodeName, indent, genConstraints, 1, genGGcode, parentName, genImports, depth + 1 )\r\n if self.isSubclass(node, 'ASG'):\t\t\t\t\t# if it is a subclass of ASG, ws should include the file generated (hierarchical modeling)\r\n newFile = fileName+str(counter)\r\n file.write(indent+'exec \"from '+newFile+' import '+newFile+'\\\\n\" in self.__dict__, self.__dict__\\n')\r\n file.write(indent+'self.'+newFile+'(self, self.obj'+str(node.objectNumber)+') \\n\\n')\r\n counter = counter + 1\r\n else:\r\n for node in nodesToGenList:\t\t\t\t\r\n node.genAttributesCode(file, genGraph, None, isRootNode, rootNodeName, indent, genConstraints, 1, genGGcode, parentName, genImports, depth + 1 )\r\n if self.isSubclass(node, 'ASG'):\t\t\t\t\t# if it is a subclass of ASG, ws should include the file generated (hierarchical modeling)\r\n newFile = fileName+str(counter)\r\n file.write(indent+'exec \"from '+newFile+' import '+newFile+'\\\\n\" in self.__dict__, self.__dict__\\n')\r\n file.write(indent+'self.'+newFile+'(self, self.obj'+str(node.objectNumber)+') \\n\\n')\r\n counter = counter + 1\r\n \r\n \r\n # if fileName has a value, we are saving a model, we must generate a function to hold the connections...\r\n if fileName != '':\r\n # if we are not dealing with a hierarchical model, an extra method is not needed..\r\n hierarchical = self.isHierarchical()\r\n if hierarchical:\r\n file.write('\\ndef '+fileName+'_connections(self, rootNode):\\n')\r\n\r\n\r\n #-------- Modified by Ximeng Sun / Apr 9,2005 for large conn nums --------\r\n file.write('\\n')\r\n writed = 0\r\n # generate code for the connections...\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]:\r\n if isRootNode: \r\n if(node.__dict__.has_key('name')):\r\n debugName = ' named ' + node.name.toString() + '\\n'\r\n else:\r\n debugName = ' of type ' + node.__class__.__name__ + '\\n'\r\n file.write(indent+'# Connections for obj'+str(node.objectNumber)\r\n +' (graphObject_: '+node.graphObject_.tag + ')' + debugName)\r\n file.write(indent+'self.drawConnections(\\n')\r\n res = node.genConnectionsCode(file, genGraph, isRootNode, \r\n indent, 1, writed)\r\n if isRootNode: \r\n file.write(' )\\n')\r\n else:\r\n for node in nodesToGenList:\r\n if isRootNode: file.write(indent+'self.drawConnections(')\r\n res = node.genConnectionsCode(file, genGraph, isRootNode, indent, 1, \r\n writed, nodesToGenList = nodesToGenList)\r\n if isRootNode: file.write(' )\\n')\r\n file.write('\\n')\r\n #------------ End of modification by Ximeng Sun / Apr 9,2005 -------------\r\n \r\n \r\n # if rootNode and I'm generating a function (filename != '')\r\n # then call subModel's functions for connections...\r\n if isRootNode and fileName != '': # if main model\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes: # iterate, to search for all submodels\r\n for node in self.listNodes[nodetype]:\r\n if self.isSubclass(node, 'ASG'): # found a submodel\r\n file.write(indent+'self.'+fileName+str(counter)+'_connections( self, self.obj'+str(node.objectNumber)+')\\n')\r\n writed = 1\r\n counter = counter + 1\r\n else:\r\n for node in nodesToGenList:\r\n if self.isSubclass(node, 'ASG'): # found a submodel\r\n file.write(indent+'self.'+fileName+str(counter)+'_connections( self, self.obj'+str(node.objectNumber)+')\\n')\r\n writed = 1\r\n counter = counter + 1\r\n \r\n \r\n if fileName != '' and (not writed) and hierarchical: # we must write 'pass', because nothing has been writed in the function!!\r\n file.write(indent+'pass\\n')", "def clean():\n new_tree = None", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def pseudo_tree(self, gids, out_tree):\r\n \r\n pseudo_tree = '('\r\n pseudo_tree += ','.join(gids)\r\n pseudo_tree += ');'\r\n \r\n fout = open(out_tree, 'w')\r\n fout.write(pseudo_tree)\r\n fout.close()", "def buildTree(self,newick):\n\t\tfor i in range(len(newick)):\n\t\t\tif newick[i] == \"(\":\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[0]\n\t\t\t#polytomy support enabled\n\t\t\telif newick[i] == \",\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[-1]\n\t\t\telif newick[i] == \")\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\telse:\n\t\t\t\tself.currNode.info+=newick[i]", "def _walk(self, level=0):\n l_dict = self.list_all()\n indent = level * \" \"\n for node in l_dict[\"nodes\"]:\n print(indent + \"node\", node)\n for group in l_dict[\"groups\"]:\n print(indent + \"group: \", group)\n with self.open(group) as hdf_group:\n hdf_group._walk(level=level + 1)", "def make_drs_tree(self):\n pass", "def rf_treeMode(self, selTree, treeDict):\n for node in treeDict['tree']['_order']:\n newItem = TreeNode(**treeDict['tree'][node])\n if len(node.split('/')) == 1:\n self.addTopLevelItem(newItem)\n else:\n parent = self._getItemFromTreePath('/'.join(node.split('/')[:-1]))\n parent.addChild(newItem)\n if getattr(newItem, 'nodeType') == 'shotNode':\n newItem._itemPath = node\n newItem._dataPath = os.path.join(self.pm._treePath, selTree)\n for fld in node.split('/'):\n newItem._dataPath = os.path.join(newItem._dataPath, fld)\n newItem._dataPath = pFile.conformPath(newItem._dataPath)\n newItem._dataFile = \"%s.py\" % newItem._dataPath\n for step in treeDict['steps']:\n newStep = TreeNode(nodeType='step', nodeLabel=step, nodeName=step)\n newStep._tree = selTree\n newStep._step = step\n newStep._dataPath = newItem._dataPath\n newStep._ltPath = pFile.conformPath(os.path.join(newStep._dataPath, 'lt', step))\n newStep._dataFile = newItem._dataFile\n newItem.addChild(newStep)", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def explore(racine,file_out):\n if racine.data == \"programme\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n elif racine.data == \"txt\":\n if(file_out != None):\n file_out.write(racine.children[0])\n \n elif racine.data == \"dumbo_bloc\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n\n elif racine.data == \"expression_list\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n\n elif racine.data == \"expression\":\n if(racine.children[0].data == \"print\"):\n if file_out is not None:\n file_out.write(str(getVar(racine.children[0].children[0])) )\n \n elif(racine.children[0].data == \"if\"):\n executeIf(racine.children[0],file_out)\n elif(racine.children[0].data == \"for\"):\n executeFor(racine.children[0],file_out)\n elif(racine.children[0].data == \"variable\"):\n dic[racine.children[0].children[0]] = getVar(racine.children[1])", "def test_buildTree(self):\r\n root = buildTree()\r\n assert root.getchildren()[0].getchildren()[0].attrib['id'] == 'c-test_crisis'\r\n assert root.getchildren()[1].getchildren()[0].attrib['id'] == 'o-test_org'\r\n assert root.getchildren()[2].getchildren()[0].attrib['id'] == 'p-algore'", "def old_create_dir_struct(self, create_first_rev_folder=\"True\"):\n # | - create_dir_struct\n for job in self.job_var_lst:\n if create_first_rev_folder == \"True\":\n path = self.var_lst_to_path(job) + \"_1\"\n elif create_first_rev_folder == \"False\":\n path = self.var_lst_to_path(job)\n\n path = self.root_dir + \"/\" + path\n\n if os.path.exists(path):\n mess = \"Path already exists: \" + str(path)\n print(mess)\n\n elif not os.path.exists(path):\n os.makedirs(path)\n\n # | - Creating Variable Text Files Through Directoy Structure\n for job in self.job_var_lst:\n path = self.var_lst_to_path(job)\n path = self.root_dir + \"/\" + path\n\n file_name = path + \"job_dir_level\"\n with open(file_name, \"w\") as fle:\n fle.write(\"\\n\")\n\n for root, dirs, files in os.walk(self.root_dir + \"/data/\"):\n if \"job_dir_level\" in files:\n continue\n\n else:\n prop_lst = []\n for folder in dirs:\n tmp = self.sep.join(folder.split(self.sep)[1:])\n\n prop = self.__replace_p_for_per__(tmp)\n prop = self.__replace_negative_for_n__(prop)\n prop_lst.append(prop)\n\n for key, value in self.level_entries.items():\n if set(prop_lst) == set(map(str, value)):\n\n file_name = root + \"/properties.txt\"\n with open(file_name, \"w\") as fle:\n fle.write(key + \"\\n\")\n\n # f = open(root + \"/properties.txt\", \"w\")\n # f.write(key + \"\\n\")\n # f.close()\n # __|\n\n # self.__create_dir_structure_file__()\n\n # | - folders_exist attribute should be True from now on\n file_name = self.root_dir + \"/jobs_bin/.folders_exist\"\n with open(file_name, \"w\") as fle:\n fle.write(\"\\n\")\n\n self.folders_exist = self.__folders_exist__(True)\n # __|\n\n # __|", "def _do_build ():\n if os.path.exists(\"./database\"):\n data_path = \"./database/\"\n elif os.path.exists(\"../database\"):\n data_path = \"../database/\"\n elif os.path.exists(\"../../database\"):\n data_path = \"../../database/\"\n else:\n data_path = \".\"\n\n dir_specs = {}\n databases = []\n\n # first pass over the databases to create complete tree:\n for dirpath, dirnames, filenames in os.walk(data_path):\n # all databases are stored\n for name in filenames:\n if name.endswith(\".db\"):\n databases.append(os.path.join(dirpath, name).replace(data_path, \"\"))\n # but we need to store specs here otherwise things could get a bit confusing\n elif name.endswith(\".spec\"):\n possible_dir = os.path.join(dirpath, name[:-5]+\".db\")\n if os.path.exists(possible_dir) and os.path.isdir(possible_dir):\n spec_name = possible_dir.replace(data_path, \"\")\n dir_specs[spec_name] = parse_spec(os.path.join(dirpath, name))\n\n # and we create DatabaseFolders for each subfolder\n for name in dirnames:\n if name.endswith(\".db\"):\n # dump the extension here too\n obj_name = name[:-3]\n this_folder = DatabaseFolder(obj_name)\n\n if dir_specs.has_key(name):\n this_folder.spec = dir_specs.pop(name)\n\n if dirpath != data_path:\n search = dirpath.replace(data_path, \"\").split(PATH_DELIM)\n try:\n top_folder = globals()[search[0]]\n except KeyError:\n raise DatabaseError, \"Subdirectory of a db folder without a DatabaseFolder?\"\n for p in search[1:]:\n if p == name:\n break\n try:\n top_folder = getattr(top_folder, p)\n except AttributeError:\n raise DatabaseError, \"Subdirectory of a db subfolder without a DatabaseFolder subfolder!\"\n top_folder.append(this_folder)\n else:\n globals()[obj_name] = this_folder\n\n for database in databases:\n build_from_file_name(database, data_path)", "def test_Tree():", "def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def save_tree(self, filename=None, fields=None):\n\n if filename is None:\n filename = f\"tree_{self.uid}\"\n\n return self.arbor.save_arbor(\n filename=filename, fields=fields,\n trees=[self])", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def save_root(self, filename=None):\n if not filename:\n filename = tkFileDialog.asksaveasfilename()\n if filename:\n maketree.populate(self.initial.itervalues(),\n self.detectors.itervalues())\n maketree.write(filename)", "def post_order(self):\n try:\n if not self.root:\n return \"the tree is empty!\"\n else:\n output = []\n\n def order_tree(node):\n if node.left:\n order_tree(node.left)\n if node.right:\n order_tree(node.right)\n nonlocal output\n output += [node.value]\n return output\n final_out = order_tree(self.root)\n return final_out\n except:\n print(\"something went wrong please try again\")", "def _collapse_all(self):\n# global approved, conflicts, suggestions, unknown, cldr\n self.tree.item('approved', open=False, \\\n values=[self._count_children('approved'), ''])\n for child in self.tree.get_children('approved'):\n self.tree.item(child, tags='approved')\n\n self.tree.item('conflicts', open=False, \\\n values=[self._count_children('conflicts'), ''])\n for child in self.tree.get_children('conflicts'):\n self.tree.item(child, tags='conflicts')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='conflicts',)\n \n self.tree.item('suggestions', open=False, \\\n values=[self._count_children('suggestions'), ''])\n for child in self.tree.get_children('suggestions'):\n self.tree.item(child, tags='suggestions')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='suggestions')\n\n self.tree.item('unknown', open=False, \\\n values=[self._count_children('unknown'), ''])\n for child in self.tree.get_children('unknown'):\n self.tree.item(child, tags='unknown')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='unknown')\n\n self.tree.item('cldr', open=False, \\\n values=[self._count_children('cldr'), ''])\n for child in self.tree.get_children('cldr'):\n self.tree.item(child, tags='cldr')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='cldr')\n\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def dump_iteration_tree(obj):\n def _dump_iteration_tree(obj, f, tablevel):\n if is_instance(obj, Driver):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n for comp in obj.workflow:\n if is_instance(comp, Driver) or is_instance(comp, Assembly):\n _dump_iteration_tree(comp, f, tablevel + 3)\n else:\n f.write(' ' * (tablevel + 3))\n f.write(comp.get_pathname())\n f.write('\\n')\n elif is_instance(obj, Assembly):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n _dump_iteration_tree(obj.driver, f, tablevel + 3)\n f = cStringIO.StringIO()\n _dump_iteration_tree(obj, f, 0)\n return f.getvalue()", "def generate_tree(self, max_depth = None):\n\n if max_depth is None:\n max_depth = self.tree.max_depth\n else:\n max_depth -= 1\n if max_depth == 0:\n return\n self.generate_children()\n if self.tree.remove:\n os.unlink(self.source_filename)\n for child in self.children:\n if child.count > self.tree.max_count:\n child.generate_tree(max_depth)", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def apply_tree(tree: dict, func: Callable, args: Optional[Tuple] = None, kwargs: Optional[Mapping] = None) -> None:\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n func(item, *args, **kwargs)\n explored.add(uid)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1", "def segmental_context(save_path=conv07_outpath, data=conv07_data):\n\n save_path = os.path.join(save_path, \"segmental_context\")\n for phone in [\"phone1\", \"phone2\"]:\n attributes_names = filter(lambda x: x.startswith(phone), all_attributes) + [\"outcome\"]\n tree_file_name = phone + \"-all.dot\" #will also save a .png with the same name\n make_tree_from_attributes(save_path, tree_file_name, attributes_names, data=data)", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def SaveObjects(self):\n print \"Saving objects!\"\n for type, module in self.object_modules.iteritems():\n print \"Saving objects of type: %s\" % type\n for node in self.objects[type]:\n if node.modified:\n print \"\\tSaving %s - %s\" % (type, node.name)\n node.SaveObject()", "def apply(self, tree):\n raise NotImplementedError()", "def __manage_tree(self):\n for pre, fill, node in RenderTree(self.tree):\n if node.name is 'count':\n logger.info(\n \"Tree info %s%s: %s %s p/s attack: %s\",\n pre, node.name, node.value, node.pps, node.attack)\n else:\n logger.info(\"Pre - [%s], Fill - [%s], Node - [%s]\",\n pre, fill, node.name)", "def generate_tree_postorder(node_lst, root_index):", "def create_tree():\n\tdtree = {}\n\n\tdtree['stats'] = None\n\tdtree['libs'] = {'fort':\n\t\t\t\t\t\t\t {'integers': None, 'floats': None, 'data': None}\n\t\t\t\t\t ,'cache blocking': None}\n\tdtree['grid'] = {'size' :\n\t\t\t\t\t\t\t {'nxgb': None, 'nygb': None, 'nzgb': None}\n\t\t\t\t\t ,'geom' :\n\t\t\t\t\t\t\t {'Lx' : None, 'Ly' : None, 'Lz' : None\n\t\t\t\t\t\t\t ,'dx' : None, 'dy' : None, 'dz' : None\n\t\t\t\t\t\t\t ,'x' : None, 'y' : None, 'z' : None\n\t\t\t\t\t\t\t ,'xloc': None, 'yloc': None, 'zloc': None}}\t\t\t\t\t\t\t \t\t\t\t\t \t\t \n\tdtree['eqns'] = {'qvec' : \n\t\t\t\t\t\t\t {'nvars': None, 'solved': None, 'stored': None, 'views': None}\n\t\t\t\t\t ,'coeff': None\n\t\t\t\t\t ,'time' : None\n\t\t\t\t\t ,'ndim' : None}\n\n\tdtree['misc'] = {'verbose': None, 'working precision': None}\n\tdtree['mpi'] = {'split': \n\t\t\t\t\t\t\t {'nxpr': None, 'nypr': None, 'nzpr': None}\n\t\t\t\t\t ,'dMpi' : None}\t\t \t\n\tdtree['num'] = {'hlo' : None\n\t\t\t\t\t ,'deriv': \n\t\t\t\t\t\t\t {'order': None, 'stencil': None, 'hlo': None} \n\t\t\t\t\t ,'filtr': \n\t\t\t\t \t {'order': None, 'stencil': None, 'hlo': None,'eps': None}\n\t\t\t\t\t ,'tint' : \n\t\t\t\t\t {'tstep': None, 'cfl': None, 'itn': None}}\n\n\tdtree['bc'] = {'wall': \n\t\t\t\t\t\t\t {'isoT': None, 'zeroQ': None, 'slip': None}}\t\n\n\tdtree['usr'] = None\n\tdtree['ios'] = None\n\t\n\tfrom rhsinfo import dim, stencil, order, coefficients, varname, varsolved, varstored, varbc, wp,hlo_rhs\n\n\tdtree['eqns']['qvec']['solved'] = []\n\tdtree['eqns']['qvec']['stored'] = []\n\tdtree['eqns']['qvec']['bcs'] = {'face':{'i' :[],'j' :[],'k' :[]},\n\t\t\t\t\t\t\t\t\t 'edge':{'ij':[],'jk':[],'ik':[]}}\n\n\tfor v in varsolved:\n\t\tdtree['eqns']['qvec']['solved'].append([v,varname[v]])\t\n\n\tfor v in varstored:\n\t\tdtree['eqns']['qvec']['stored'].append([v,varstored[v]['ind']])\n\n\tfor v in varbc:\n\t\tfor bcloc in ['face','edge']:\n\t\t\tif bcloc in varbc[v]:\n\t\t\t\tloctype = ''.join(sorted(varbc[v][bcloc].replace('1','').replace('max','')))\n\t\t\t\tdtree['eqns']['qvec']['bcs'][bcloc][loctype].append([v,varbc[v]['ind']])\n\n\n\tdtree['eqns']['coeff'] = []\n\tfor v in coefficients:\n\t\tdtree['eqns']['coeff'].append([v,coefficients[v]])\t\n\n\tdtree['eqns']['qvec']['nvars'] = len(varname)#+len(dtree['eqns']['qvec']['stored'])\t\n\tdtree['num']['deriv']['stencil'] = stencil\n\tdtree['num']['deriv']['hlo'] = hlo_rhs #int((stencil-1)/2)\n\tdtree['num']['deriv']['order'] = order\n\t\n\t# if dtree['num']['filtr']['hlo'] != None:\n\t# \tdtree['num']['hlo'] = max(dtree['num']['deriv']['hlo'],dtree['num']['filtr']['hlo'])\n\t# else:\n\t# \tdtree['num']['hlo'] = dtree['num']['deriv']['hlo']\t\n \n\tdtree['num']['hlo'] = hlo_rhs\t\t\t\n\t\n\tdtree['eqns']['ndim'] = dim\n\tdtree['misc']['working precision'] = wp\n\tdtree['misc']['verbose'] = True\n\n\t# dtree['libs']['cache blocking'] = [256,2,6] # good for 11 pts 3D, div. forme of N.S.\n\t\n\tdtree['libs']['cache blocking'] = [2560000,2,6]\n\n\t# recover BCs info:\n\t\n\ttry:\n\t from rhsinfo import bc_info\n\texcept: \n\t\tbc_info = [{},{}]\n\n\tdtree['bc']\t = {'allbc':bc_info[1],'mybc':[]} # OVERWRITE predefined 'bc' key.\n\n\treturn dtree", "def climb_tree():\n global UP_TREE\n westdesc = \"\"\n eastdesc = \"\"\n northdesc = \"\"\n southdesc = \"\"\n UP_TREE = True\n westinvalid = False\n eastinvalid = False\n northinvalid = False\n southinvalid = False\n\n\n printmessage(\"You climb the large tree to get a look at your surroundings.\", 5, MAGENTA, 2)\n\n if ZERO_BASE_PLYR_POS in range(0, 10):\n northinvalid = True\n if ZERO_BASE_PLYR_POS in range(90, 100):\n southinvalid = True\n if ZERO_BASE_PLYR_POS in range(0, 91, 10):\n eastinvalid = True\n if ZERO_BASE_PLYR_POS in range(9, 100, 10):\n westinvalid = True\n \n if not westinvalid: \n westpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 1]\n if HAS_COMPASS: \n DISCOVERED[ZERO_BASE_PLYR_POS + 1] = \"Y\"\n if westpos == 10: # Water\n westdesc = TREE_VIEWS[2]\n else:\n westdesc = TREE_VIEWS[1]\n\n westpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 1]\n if westpos == 1:\n westdesc = TREE_VIEWS[3]\n elif westpos == 2:\n westdesc = TREE_VIEWS[4]\n else:\n westdesc = TREE_VIEWS[5]\n\n if not eastinvalid:\n eastpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 1]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 1] = \"Y\"\n if eastpos == 10: # Water\n eastdesc = TREE_VIEWS[2]\n else:\n eastdesc = TREE_VIEWS[1]\n\n eastpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 1]\n if eastpos == 1:\n eastdesc = TREE_VIEWS[3]\n elif eastpos == 2:\n eastdesc = TREE_VIEWS[4]\n else:\n eastdesc = TREE_VIEWS[6]\n\n\n if not northinvalid:\n northpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 10] = \"Y\"\n if northpos == 10: # Water\n northdesc = TREE_VIEWS[2]\n else:\n northdesc = TREE_VIEWS[1]\n\n northpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 10]\n if northpos == 1: # bear\n northdesc = TREE_VIEWS[3]\n elif northpos == 2: # grizzly\n northdesc = TREE_VIEWS[4]\n else:\n northdesc = TREE_VIEWS[7]\n\n\n if not southinvalid:\n southpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS + 10] = \"Y\"\n if southpos == 10: # Water\n southdesc = TREE_VIEWS[2]\n else:\n southdesc = TREE_VIEWS[1]\n\n southpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 10]\n if southpos == 1: # bear\n southdesc = TREE_VIEWS[3]\n elif southpos == 2: # grizzly\n southdesc = TREE_VIEWS[4]\n else:\n southdesc = TREE_VIEWS[8]\n\n clear_messages(0)\n printmessage(\"West: \" + westdesc, 2, GREEN, 0)\n printmessage(\"East: \" + eastdesc, 3, YELLOW, 0)\n printmessage(\"North: \" + northdesc, 4, CYAN, 0)\n printmessage(\"South: \" + southdesc, 5, MAGENTA, 0)\n #show_movement(True, 10)\n update_player_on_map()\n pause_for_keypress()\n clear_messages(0)", "def _from_etree_to_tree(self, lang='en-US'):\n #clear existing tree\n# for i in self.tree.get_children():\n# self.tree.delete(i)\n self.tree.delete(*self.tree.get_children())\n #now insert old tree\n for category in self.trout:\n tagged = category.get('tags')\n if tagged is None:\n tagged = \"('{}',)\".format(category.tag)\n if tagged[-1] == ')':\n inserttext = tagged[2:3].upper() + tagged[3:tagged.find(')')-2]\n else:\n inserttext = tagged[1:2].upper() + tagged[2:-1]\n #messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(lang, inserttext))\n thiscategory = self.tree.insert('', 'end', iid=inserttext.lower(), values=['', ''], \\\n text=LOCALIZED_TEXT[lang][inserttext], tags=\"{}\".format(inserttext.lower()))\n for term in category:\n values = eval(term.get('values'))\n tags = term.get('tags')\n# messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(values, tags))\n thisterm = self.tree.insert(thiscategory, 'end')\n self.tree.item(thisterm, tags=term.get('tags'))\n self.tree.item(thisterm, text=term.text)\n self.tree.item(thisterm, values=[str(values[0]), str(values[1])])\n# tags=term.get('tags'))\n for rendering in term:\n thisrendering = self.tree.insert(thisterm, 'end', \\\n text=rendering.text, values=term.get('values'), \\\n tags=rendering.get('tags'))\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.tree.update() \n pass", "def _dfs_assign(self, filetree):\n stack = [filetree]\n while stack:\n node = stack.pop()\n if isinstance(node, tuple) and node[0][\"packmode\"] is None:\n # all children have been seen already, assing packmode\n node = node[0] # unpack the actual node\n weights = defaultdict(int)\n for child in node[\"children\"].values():\n weights[child[\"packmode\"]] += child[\"weight\"]\n packmode, weight = max(weights.items(), key=lambda x: x[1])\n node[\"weight\"] = weight\n node[\"packmode\"] = packmode\n elif node[\"children\"]:\n # schedule that node for computation\n stack.append((node,))\n # visit all children first\n for child in node[\"children\"].values():\n stack.append(child)", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def _from_tree_to_etree(self):\n categories = self.tree.get_children('')\n# messagebox.showwarning('_from_tree_to_etree', \\\n# 'categories={}'.format(categories))\n for category in categories:\n \n acategory = etree.SubElement(self.trout, self.tree.item(category)['text'])\n if category =='approved':\n acategory.set('tags', \"('approved',)\")\n elif category =='conflicts':\n acategory.set('tags', \"('conflicts',)\")\n elif category =='suggestions':\n acategory.set('tags', \"('suggestions',)\")\n elif category =='unknown':\n acategory.set('tags', \"('unknown',)\")\n elif category =='cldr':\n acategory.set('tags', \"('cldr',)\")\n else:\n messagebox.showerror('_from_tree_to_etree', \\\n 'unrecognised category >{}<'.format(category))\n return\n# acategory.text = self.tree.item(category)['text']\n sons = self.tree.get_children(category)\n# messagebox.showwarning('_from_tree_to_etree', \\\n# '{}, sons={}'.format(category, sons))\n for son in sons:\n ason = etree.SubElement(acategory, son)\n# ason.text = self.tree.item(son)['text']\n ason.set('values', '{}'.format(self.tree.item(son)['values']))\n ason.set('tags', '{}'.format(tuple(self.tree.item(son)['tags'])))\n grandsons = self.tree.get_children(son)\n for grandson in grandsons:\n agrandson = etree.SubElement(ason, grandson)\n agrandson.text = self.tree.item(grandson)['text']\n agrandson.set('values', \\\n '{}'.format(self.tree.item(grandson)['values']))\n agrandson.set('tags', \\\n '{}'.format(tuple(self.tree.item(grandson)['tags'])))\n# grandsons = self.tree.get_children(grandson)\n# messagebox.showwarning('','{}'.format(etree.tostring(self.trout, \\\n# encoding='unicode', \\\n# pretty_print=True)))\n# messagebox.showwarning('_from_tree_to_etree', \\\n# 'filled with {} categories'.\\\n# format([child.tag for child in self.trout]))\n return self.trout", "def buildnemxml(self):\n for n in sorted(self._objs.keys()):\n emanenode = self._objs[n]\n emanenode.buildnemxmlfiles(self)", "def execute(self):\n if len(self._tree) > 0:\n return self._tour(self._tree.root(),0,[]) # start the recursion", "def tree(self, st, iter):\n player = st._state['visible']['turn']\n number = st._state['visible']['number']\n #state = Pylos_copy._state['visible']\n placements = self.allplacement(st)\n upmoves = self.moveup(st)\n movements = placements + upmoves\n if iter is 3:\n self.firstmoves_func(movements)\n children = []\n if iter < 1:\n return Tree(st._state['visible'])\n self.deltas = []\n if iter == 3:\n iter -= 1\n for movement in movements:\n Pylos_copy = copy.deepcopy(st)\n number += 1\n Pylos_copy.update(movement, player)\n Pylos_copy._state['visible']['number'] = number\n self.deltas.append(self.delta_func(Pylos_copy))\n # max_indice = [number for number, delta in self.deltas if delta == max(self.deltas[1])]\n # Pylos_copy.set(movement['to'],player)\n child = self.tree(Pylos_copy, iter)\n children.append(child)\n else:\n iter -= 1\n for movement in movements:\n Pylos_copy = copy.deepcopy(st)\n Pylos_copy.update(movement, player)\n Pylos_copy._state['visible']['number'] = number\n self.deltas.append(self.delta_func(Pylos_copy))\n # max_indice = [number for number, delta in self.deltas if delta == max(self.deltas[1])]\n # Pylos_copy.set(movement['to'],player)\n child = self.tree(Pylos_copy, iter)\n children.append(child)\n return Tree(st._state['visible'], children)", "def build_tree(self, genes_share_one_alignment):\r\n species_name = self.species\r\n fun_built_tree = getattr(SSTree, species_name)\r\n return fun_built_tree(genes_share_one_alignment)", "def _build_tree(self, index):\n\n children = []\n to_string = '({0}/{1}'.format(self.tokens[index], self.labels[index])\n\n for i in range(1, len(self.tokens)):\n\n if i not in self._visited and self.heads[i] == index:\n self._visited.append(i)\n child_tree = {}\n c, s = self._build_tree(i)\n child_tree[(self.spans[i][0], self.spans[i][1], self.tokens[i], self.labels[i])] = c\n children.append(child_tree)\n to_string += ' {0}'.format(s)\n\n if len(children) > 0:\n to_string += ')'\n return children, to_string\n else:\n return children, to_string[1:]", "def tree_model(feature_train, help_rank_train, model_name):\n decision_tree = DecisionTreeClassifier()\n decision_tree = decision_tree.fit(feature_train, help_rank_train)\n tree_model = open(model_name,'wb')\n dump(decision_tree, tree_model, -1)\n return", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def build_tree(text):\n if text[0] == \"-\":#Application\n l = [None, None]\n l[0], text = build_tree(text[1:])#use build_tree recursively to allow nested stuff, return text so that the next function can continue where the nested one left off.\n l[1], text = build_tree(text[1:])\n elif text[0] == \"*\":#Abstraction\n l = [text[0:2], None]\n text = text[1:]#the variable has to be removed before proceeding\n l[1], text = build_tree(text[1:])#same as above\n l = (l[0], l[1])\n else:\n l = text[0]\n return l, text", "def makeTree(plan, initialState):\n\n\t#The initial node of the behaviour tree\n\ttree = Sequence(\"Tree\")\n\n\t#Added the routines from the black board\n\ttree.add_child(global_vars.black_board.makeRoutines())\n\n\t#The node of the plan\n\tplanTask = Sequence(\"Plan\")\n\n\t#Initialize the first place where the robot starts\n\tlastPlace = global_vars.black_board.getRobotOrigin()\n\n\t#Set all the posible tasks in the black board to be executed\n\tglobal_vars.black_board.taskDone = [False for i in range(len(plan))]\n\n\tstate = copy.deepcopy(initialState)\n\n\t#For every task in the plan...\n\tfor i in range(len(plan)):\n\t\t#If the task is the movement task\n\t\tif plan[i][0] == global_vars.black_board.movementTask:\n\t\t\tcoord = global_vars.black_board.getCoords(plan[i]\n\t\t\t\t[global_vars.black_board.destArg])\n\t\t\tif coord != False:\n\t\t\t\t#Creates a super node to hold the task\n\t\t\t\tactionTask = Sequence(\"Action \" + str(i+1))\n\n\t\t\t\tfunction = hop.operators[plan[i][0]]\n\n\t\t\t\t#Creates a movement task and adds it to the actionTask\n\t\t\t\t#with the corresponding setDoneTask\n\t\t\t\tactionTask.add_child(goToTask(\"MoveToTask: \" +\n\t\t\t\t\tplan[i][global_vars.black_board.destArg], coord))\n\t\t\t\tactionTask.add_child(setDoneTask(\"SetDoneTask \"+ str(i+1), i,\n\t\t\t\t\tfunction, plan[i][1:]))\n\n\t\t\t\t#Updates the robot position\n\t\t\t\tlastPlace = plan[i][2]\n\n\t\t\t\tcheckDone = checkDoneTask(\"CheckDoneTask \"+ str(i+1), i, copy.deepcopy(state))\n\t\t\t\t#Adds a node that first checks if the task has been executed,\n\t\t\t\t#and if not executes it\n\t\t\t\tplanTask.add_child(Selector(\"Task \"+ plan[i][0], [checkDone, actionTask]))\n\t\t\t\tstate = function(copy.deepcopy(state), *plan[i][1:])\n\n\n\t\t\telse:\n\t\t\t\traise ValueError(\"Place not defined in the black board\")\n\n\t\t#If not is the movement task\n\t\telse:\n\t\t\t#Request the executable task to the black board\n\t\t\ttask = global_vars.black_board.getTask(plan[i][0])\n\t\t\tif task != False:\n\n\t\t\t\t#Creates a super node to hold the task\n\t\t\t\tactionTask = Sequence(\"Action \" + str(i+1))\n\n\t\t\t\tfunction = hop.operators[plan[i][0]]\n\n\t\t\t\t#Adds the task and his setDoneTask to the actionTask\n\t\t\t\tactionTask.add_child(task)\n\t\t\t\tactionTask.add_child(setDoneTask(\"SetDoneTask \"+ str(i+1), i,\n\t\t\t\t\tfunction, plan[i][1:]))\n\n\t\t\t\t#Subroutine to check the robots position and returns to the work place\n\t\t\t\tcoords = global_vars.black_board.getCoords(lastPlace)\n\t\t\t\tif coords != False:\n\n\t\t\t\t\tcheckLocation = checkLocationTask(lastPlace)\n\t\t\t\t\tmoveToLasPositionTask = goToTask(\"MoveToTaskLastPosition: \" + lastPlace, coords)\n\n\t\t\t\t\t#The subroutine first checks the location of the robot, and then if necesary moves it\n\t\t\t\t\tNavigationTask = Selector(\"NavSubroutine\", [checkLocation, moveToLasPositionTask])\n\n\t\t\t\t\t#Creates a node with all the executable leaf nodes\n\t\t\t\t\texecTask = Sequence(\"Executable\", [NavigationTask, actionTask])\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Place not defined in the black board\")\n\n\n\t\t\t\tcheckDone = checkDoneTask(\"CheckDoneTask \"+ str(i+1), i, copy.deepcopy(state))\n\t\t\t\t#Adds a node that first checks if the task has been executed,\n\t\t\t\t#and if not executes it\n\t\t\t\tplanTask.add_child(Selector(\"Task \"+ plan[i][0], [checkDone, execTask]))\n\t\t\t\tstate = function(copy.deepcopy(state), *plan[i][1:])\n\t\t\telse:\n\t\t\t\traise ValueError(\"Task not defined in the black board\")\n\n\t#Add the plan to the tree and returns it\n\ttree.add_child(planTask)\n\tglobal_vars.black_board.setReplan(False)\n\n\treturn tree", "def savePlace(fileid,data):\n global places\n info = data.get('info')\n rels = data.get('relat')\n fn = fileid + \".xml\"\n place = etree.Element(\"place\")\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"commonname\",\"name\",\"start\",\"scue\",\"end\",\"ecue\",\"stories\",\"mention\",\"desc\",\"address\",\"loc\",\"locfile\",\"state\",\\\n\"statefile\",\"note\", \"relat\", \"aspects\", \"update\"]\n reltags = [\"related\", \"relation\", \"file\", \"rtype\", \"events\", \"cat\", \"realm\"]\n for tag in tags:\n if tag == \"relat\":\n if len(rels):\n for r in rels:\n if rels[r].get(\"related\") and rels[r].get(\"relation\") and rels[r].get(\"rtype\") and rels[r].get(\"cat\"):\n connected = etree.Element(\"relat\")\n for t in reltags:\n if rels[r].get(t):\n if t == \"events\":\n if len(rels[r]['events']):\n events = etree.Element(\"events\")\n elist = rels[r]['events'].keys()\n chron = sorted(elist, key = lambda x: rels[r]['events'][x].get(\"date\"))\n for e in chron:\n mstone = etree.Element(\"mstone\")\n etree.SubElement(mstone,\"date\").text = rels[r]['events'][e].get(\"date\",(\"\",False))[0]\n etree.SubElement(mstone,\"event\").text = rels[r]['events'][e].get(\"event\",(\"\",False))[0]\n if rels[r]['events'][e].get(\"Type\"):\n etree.SubElement(mstone,\"type\").text = rels[r]['events'][e].get(\"type\")[0]\n events.append(mstone)\n connected.append(events)\n else:\n value = rels[r].get(t)\n if value is None: value = ['',False]\n etree.SubElement(connected,t).text = value[0]\n place.append(connected)\n else:\n print \"A required tag is missing from relation %s.\" % r\n else:\n print \"no relations found\"\n elif tag == \"note\":\n if info.get(tag):\n for i in range(len(info[tag])):\n note = etree.Element(tag)\n di = info[tag].get(str(i))\n if di:\n value = di.get(\"content\")\n if value is not None:\n etree.SubElement(note,\"content\").text = value[0]\n value = di.get(\"date\")\n if value is not None:\n etree.SubElement(note,\"date\").text = value[0]\n place.append(note)\n else:\n print \"no notes\"\n# 820 #\n elif tag == \"aspects\":\n nodes = info.get(\"aspects\")\n if nodes is not None:\n aspects = etree.Element(\"aspects\")\n for node in sorted(nodes.keys()):\n value = nodes[node]\n if value is None: value = ['',False]\n etree.SubElement(aspects,\"text\").text = value[0]\n place.append( aspects )\n else:\n print \"no aspects found\"\n\n elif tag == \"update\":\n etree.SubElement(place,tag).text = common.skrTimeStamp(config['datestyle'])\n else:\n value = info.get(tag)\n if value is None: value = ['',False]\n etree.SubElement(place,tag).text = value[0]\n saveXMLtree(place,\"place\",fileid)\n places[fileid]['changed'] = False\n return True", "def work_tree2(obj, **kwargs):\n if 'exclusions' in kwargs:\n exclusions = kwargs['exclusions']\n else:\n exclusions = Exclusions([], [], [])\n #groups_done = {}\n classes = NodeResults(nodetype='classes')\n params = NodeResults(nodetype='params')\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n # loop opts\n index_pop = to_index.pop\n index_extend = to_index.extend\n egroups, eclasses, eparams = exclusions\n add_classes = classes.add_entries\n add_params = params.add_entries\n\n while to_index:\n (obj, depth) = index_pop()\n #objname = obj.name\n #if objname in groups_done and groups_done[objname] <= depth:\n #continue\n try:\n objclasses = obj.classes.exclude(classname__in=eclasses)\n add_classes(objclasses, \"classname\", \"classparams\", depth)\n objparams = obj.parameters.exclude(paramkey__in=eparams)\n add_params(objparams, \"paramkey\", \"paramvalue\", depth)\n except RuntimeError, e:\n return (\"Fail\", \"Fail\") # or just let it bubble up to the caller\n\n #groups_done[objname] = depth\n depth += 1\n children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)]\n index_extend(children)\n\n return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)", "def _initialize_trees(self):", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def build_tree(self, root, calc_type):\n self.calcTree.DeleteAllItems()\n r = len(root.split(os.sep))\n ids = {root: self.calcTree.AddRoot(root)}\n for (dir_path, dir_names, file_names) in os.walk(root):\n if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names):\n # find the number of steps in MDE file, quickly\n nsteps = interface.GetNumMDESteps(dir_path)\n ancdirs = dir_path.split(os.sep)[r:]\n if nsteps is not None:\n ancdirs[-1] += ' [%i]' % nsteps\n ad = root\n for ancdir in ancdirs:\n d = os.path.join(ad, ancdir)\n if not d in ids:\n ids[d] = self.calcTree.AppendItem(ids[ad], ancdir)\n self.calcTree.SortChildren(ids[ad])\n ad = d", "def gen_tree(path):\n # print(\"CALLING.. Tree\")\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(path, parser)\n root = tree.getroot() \n return root, tree", "def buildTree(data, level):\n \n node = maxIG(data)\n subsets = splitBy(data, node[0])\n header = [\"Outlook\", \"Temp\", \"Humidity\", \"Wind\", \"Play\"]\n \n if node[1] == 0:\n print(\"\\t\" * level, level, getColumn(data, node[0])[0], \":\", getColumn(data, -1)[0]) \n elif level < 4:\n print(\"\\t\" * level, level, getColumn(data, level - 1)[0], \"->\", header[node[0]]) \n rec = [buildTree(subset, level + 1) for subset in subsets]\n else:\n print(\"\\t\" * level, level, getColumn(data, level - 1)[0], \":\", getColumn(data, -1))", "def redoTuple( fileRootName, treeName, categoryList, weightsDict, PT, ETA ):\n\t# Dictionary to store the names of the branches\n\tvalueName = {}\n\t# Dictionary to store the struct C++ objects pythonized\n\ttheW = {}\n\tfor category in categoryList:\n\t\tname = 'weight_'+category.replace(':','_')\n\t\tvalueName[category] = name\n\t\tcode = 'struct W_'+category.replace(':','_')+'{ double '+name+'; };'\n\t\tstructName = 'W_'+category.replace(':','_')\n\t\t# Creation of the C++ struct in ROOT\n\t\tROOT.gROOT.ProcessLine( code )\n\t\t# Importing the C++ struct and initializing\n\t\t_tmp = __import__('ROOT',fromlist=[structName])\n\t\t_toInit = _tmp.__getattr__( structName )\n\t\ttheW[category] = eval( '_toInit()' )\n\n\tcounter = 0\n\tcount2Print = '\\033[1;34mRe-doing NTuple %s for tree %s: %s%s\\033[1;m' % ( fileRootName, treeName.split('/')[0], str(int(counter)).zfill(2),'%' )\n\tsys.stdout.write( count2Print )\n\n\tfileRoot = ROOT.TFile( fileRootName,'UPDATE' )\n\t# FIXME: Check if is in there the root file\n\tt = fileRoot.Get( treeName )\n\t# Get all the branches: to extract the variables \n\tvar = dict( [ (i.GetName(),t.GetLeaf(i.GetName())) for i in t.GetListOfBranches() ] )\n\n\tcount = 0\n\tlastCounter = 0\n\ttheBranches = {}\n\tfor category,_branchName in valueName.iteritems():\n\t\ttheBranches[ _branchName ] = t.Branch( _branchName, theW[category], _branchName+'/D' )\n\tnumEntries = t.GetEntries()\n\tfor i in xrange(numEntries):\n\t\tdumm = t.GetEntry(i)\n\t\tpt = var[PT].GetValue()\n\t\teta = var[ETA].GetValue()\n\t\tfor category, weights in weightsDict.iteritems():\n\t\t\tfor name,(etaBins, ptBins, histo) in weights.iteritems():\n\t\t\t\tif eta >= etaBins[0] and eta < etaBins[1] and pt >= ptBins[0] and pt < ptBins[1]:\t\n\t\t\t\t\tbin = histo.FindBin( pt )\n\t\t\t\t\ttheW[category].__setattr__( valueName[category], histo.GetBinContent(bin) )\n\t\t\t\t\tbreak\n\t\tcounter\t= int(float(count)/numEntries*100)\n\t\tif counter % 10 != lastCounter:\n\t\t\tsys.stdout.write( '\\033[1;33m \\b\\b\\b\\b'+str(counter).zfill(2)+'%\\033[1;m' )\n\t\t\tsys.stdout.flush()\n\t\t\tlastCounter = counter % 10\n\t\tfor theBranch in theBranches.itervalues():\n\t\t\tdumm = theBranch.Fill()\n\t\tcount += 1 \n\tsys.stdout.write( '\\033[1;33m \\b\\b\\b\\b'+str(100).zfill(2)+'%\\n\\033[1;m' )\n\tsys.stdout.flush()\n\t\n\t_dir = fileRoot.Get(treeName.split('/')[0])\n\t_dir.cd()\n\tt.Write('',ROOT.TObject.kOverwrite)\n\tfileRoot.Close()", "def saveState(fileid,data):\n global states\n info = data.get('info')\n fn = fileid + \".xml\"\n state = etree.Element(\"state\")\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"start\",\"scue\",\"end\",\"ecue\",\"vital\",\"polit\",\"culture\",\"history\", \"geography\",\"econ\",\"demo\",\"events\",\"cities\",\"aspects\",\"update\"]\n for tag in tags:\n if tag == \"cities\":\n nodes = info.get(\"cities\")\n if nodes is not None:\n for node in nodes.keys():\n if nodes[node].get(\"name\"):\n connected = etree.Element(\"city\")\n value = info['cities'][node].get(\"name\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"name\").text = value[0]\n value = node\n if value is None: value = ''\n etree.SubElement(connected,\"file\").text = value\n value = info['cities'][node].get(\"note\")\n if value is not None and len(value[0]) > 0: etree.SubElement(connected,\"note\").text = value[0]\n state.append(connected)\n else:\n print \"A required tag is missing from city %s.\" % node\n else:\n print \"no cities found\"\n elif tag == \"events\":\n nodes = info.get(\"m\")\n nodes = nodes.get(\"events\")\n if nodes is not None:\n events = etree.Element(\"events\")\n for node in nodes.keys():\n if nodes[node].get(\"event\"):\n connected = etree.Element(\"mstone\")\n value = info['m']['events'][node].get(\"event\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"event\").text = value[0]\n value = info['m']['events'][node].get(\"date\")\n if value is None: value = ['',False]\n etree.SubElement(connected,\"date\").text = value[0]\n events.append(connected)\n else:\n print \"A required tag is missing from event %s.\" % node\n state.append(events)\n else:\n print \"no events found\"\n# 820 #\n elif tag == \"aspects\":\n nodes = info.get(\"aspects\")\n if nodes is not None:\n aspects = etree.Element(\"aspects\")\n for node in sorted(nodes.keys()):\n value = nodes[node]\n if value is None: value = ['',False]\n etree.SubElement(aspects,\"text\").text = value[0]\n state.append( aspects )\n else:\n print \"no aspects found\"\n\n elif tag == \"update\":\n etree.SubElement(state,tag).text = common.skrTimeStamp(config['datestyle'])\n else:\n value = info.get(tag)\n if value is None: value = ['',False]\n etree.SubElement(state,tag).text = value[0]\n r = saveXMLtree(state,\"state\",fileid)\n if r:\n try:\n states[fileid]['changed'] = False\n except KeyError:\n printPretty(states)\n return r", "def traverse_tree(tree, thisFolder, path, submission):\n\n # Get files directly underneath this folder.\n blobs = tree.blobs\n thisFolderName = tree.name\n\n # Add this folder to the path.\n path = os.path.join(path, thisFolderName)\n print(path)\n\n for blob in blobs:\n filepath = os.path.join(path, blob.name)\n add_source_file(blob.name, thisFolder, filepath, submission)\n\n # Get folders directly underneath this folder.\n folders = tree.trees\n for folder in folders:\n srcFolderObj = add_source_folder(folder.name, thisFolder)[0]\n traverse_tree(folder, srcFolderObj, path, submission)\n\n return", "def return_tree(self):\n\n return self.tree, self.ParentMap", "def build_tree(self):\n active = self.get_active()\n family = self.dbstate.db.get_family_from_handle(active)\n self.goto_handle(handle=family)", "def dump_tree(self, mainbranch: bool=True) -> List:\n data = []\n\n tovisit = [self.root]\n while len(tovisit) > 0:\n node = tovisit.pop(0)\n if not node.isleaf():\n if not mainbranch or node._mainbranch:\n # add node info to record\n tovisit.extend(node.children())\n\n # extract edge data\n cmds_node = [e.cmd for e in node.edges]\n counts_node = [e.search_outcome for e in node.edges]\n\n # add record to data\n feedback_history = node.feedback_history()\n inputs = dict()\n inputs['cmdlist'] = node.inputs['cmdlist']\n inputs['ents2id'] = {k: i for k, i in node.inputs['ents2id'].items()}\n inputs['memory_input'] = node.inputs['memory_input'].numpy().tolist()\n inputs['location_input'] = node.inputs['location_input'].numpy().tolist()\n inputs['cmdlist_input'] = node.inputs['cmdlist_input'].numpy().tolist()\n inputs['entvocab_input'] = node.inputs['entvocab_input'].numpy().tolist()\n inputs['cmdprev_input'] = node.inputs['cmdprev_input'].numpy().tolist()\n \n record = {\n \"cmdlist\": cmds_node,\n \"inputs\": inputs,\n \"nwoutput\": node.nwoutput,\n \"counts\": counts_node,\n \"value\": node.reward,\n \"feedback_history\": feedback_history,\n \"feedback_meta\": node.feedback,\n \"level\": node.level(),\n \"mainbranch\": node._mainbranch}\n data.append(record)\n return data", "def TreeInit(tree):\n \"\"\" Settings/NI_6133 \"\"\"\n tree.addNode('.SETTINGS')\n tree.addNode('.SETTINGS.EXPERIMENT')\n tree.addNode('.SETTINGS.NI')\n tree.addNode('.SETTINGS.NI.NI_6602_TIME')\n tree.addNode('.SETTINGS.NI.NI_6133')\n tree.addNode('.NI_6133')\n tree.addNode('.NI_FPGA')\n tree.addNode('.SETTINGS.NI.NI_6133_DIO')\n tree.addNode('.TEK_2024B')\n tree.addNode('.TEK_2024B.TEK')\n tree.addNode('.TEK_2024B.TEK1')\n tree.addNode('.PIMAX3')\n tree.addNode('.PIMAX3.RAW')\n tree.addNode('.PIMAX3.CAM_SETTING')\n \"\"\" Single-valued member nodes \"\"\"\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_DATE','TEXT',\n 'SHOTDATEANDTIME')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NOTES','TEXT','SHOTNOTES')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SYS_MESSAGE','TEXT','SYSMESSAGE')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_QUALITY','TEXT',\n 'SHOTQUALITY')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NUMBER','TEXT',\n 'SHOTNUMBER')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:PROG_VERSION','TEXT',\n 'PROGRAM_VERSION')\n AddNodeWithTag(tree, '.TEK_2024B.TEK:RAW', 'TEXT', 'RAWTEKSCOPE')\n AddNodeWithTag(tree, '.TEK_2024B.TEK1:RAW', 'TEXT', 'RAWTEKSCOPE1')", "def save(*args):\r\n\r\n try:\r\n global bambara_bfe\r\n global francais_bfe\r\n global english_bfe\r\n global bambara_bf\r\n global francais_bf\r\n global bambara_be\r\n global english_be\r\n\r\n # if len(bambara_bfe) > 0 and len(bambara_bf) > 0 and len(bambara_be) > 0:\r\n # pass\r\n\r\n if len(bambara_bfe) > 0:\r\n with open(\"bambara_bfe.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in bambara_bfe:\r\n f.write(\"%s\\n\" % item)\r\n bambara_bfe = list()\r\n\r\n with open(\"francais_bfe.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in francais_bfe:\r\n f.write(\"%s\\n\" % item)\r\n francais_bfe = list()\r\n\r\n with open(\"english_bfe.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in english_bfe:\r\n f.write(\"%s\\n\" % item)\r\n english_bfe = list()\r\n\r\n if len(bambara_bf) > 0:\r\n with open(\"bambara_bf.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in bambara_bf:\r\n f.write(\"%s\\n\" % item)\r\n bambara_bf = list()\r\n\r\n with open(\"francais_bf.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in francais_bf:\r\n f.write(\"%s\\n\" % item)\r\n francais_bf = list()\r\n\r\n if len(bambara_be) > 0:\r\n with open(\"bambara_be.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in bambara_be:\r\n f.write(\"%s\\n\" % item)\r\n bambara_be = list()\r\n\r\n with open(\"english_be.txt\", \"w\", encoding=\"utf-8\") as f:\r\n for item in english_be:\r\n f.write(\"%s\\n\" % item)\r\n english_be = list()\r\n\r\n except ValueError:\r\n pass", "def create_tree(self, tree):\n # print(self)\n if len(self.available_combinations()) > 1:\n comb1 = random.choice(self.available_combinations())\n comb2 = random.choice(self.available_combinations())\n\n if self.last_move == 5:\n next_move = 7\n else:\n next_move = 5\n\n # print(next_move)\n\n board1 = copy.deepcopy(self)\n board2 = copy.deepcopy(self)\n\n board1.board[comb1[0]][comb1[1]] = next_move\n board1.last_move = 7\n tree.insert_left(board1)\n board2.board[comb2[0]][comb2[1]] = next_move\n board2.last_move = 7\n tree.insert_right(board2)\n\n board1.create_tree(tree.get_left_child())\n board2.create_tree(tree.get_left_child())", "def __genAttributesROOT( self, file, indent = ' ' ):\r\n # Go through all actively tracked ASG's (formalisms)\r\n for modelMetaName in self.__trackASG.keys():\r\n if( not self.__isASGbyNameEmpty( modelMetaName ) ):\r\n # This formalism has at least one entity, we NEED IT\r\n ASG = self.getASGbyName( modelMetaName )\r\n \r\n #todo: clean this up\r\n try:\r\n modelTracker = self.__trackAttributes[ modelMetaName ]\r\n except:\r\n print 'ERROR: see ASG.py in __genAttributesROOT(), caused by Buttons Model pointing to a Formalism with a different name'\r\n print 'modelMetaName', modelMetaName \r\n print 'self.__trackAttributes', self.__trackAttributes\r\n print 'self.__trackASG', self.__trackASG\r\n \r\n file.write( '\\n'+indent+'# --- Generating attributes code for ASG '\r\n +modelMetaName+' ---\\n' ) \r\n # DOH! Some formalisms may not have any attributes at all!\r\n if( len( modelTracker ) > 0 ):\r\n file.write( indent + 'if( ' + modelMetaName + 'RootNode ): ' )\r\n i2 = indent + ' '\r\n \r\n #Generates code for the attributes value\r\n for attr in modelTracker.keys():\r\n #print attr, ASG.getAttrValue(attr)\r\n file.write( '\\n'+i2+'# '+attr+'\\n' )\r\n \r\n # Write its value to file\r\n ASG.getAttrValue(attr).writeValue2File ( file = file, indent=i2, \r\n objName = modelMetaName+'RootNode.'+attr, depth = 1,\r\n generatingCode = 0) \r\n else: \r\n file.write( indent+'# No attributes to generate!\\n' )\r\n file.write( indent+'# --- ASG attributes over ---\\n\\n' )", "def construct_tree(self, parent_name:str):\n #print()\n #print(\"Constructing tree1:\",self, len(self))\n\n # helper function\n def construct_tree_subset(pointer, end_pointer=None, parent_name:str=\"\"):\n \n # subhelper function\n def error_string(pointer, end):\n value_str = \"'\"+str(pointer.value)+\"'\"\n full_str = \"\"\n pointer = pointer.next\n cont = True\n while cont:\n if pointer == end:\n value_str = full_str\n \n if pointer == None:\n cont = False\n else:\n full_str += \" \" + \"'\" + str(pointer.value) + \"'\"\n pointer = pointer.next\n return value_str, full_str\n \n \n if pointer == None:\n raise TypeError(\"\\nError parsing Maths String: {0}\\nGiven None pointer.\".format(parent_name))\n \n elif pointer.next == end_pointer:\n val = pointer.value\n if isinstance(val, MathList):\n sub_output = val.construct_tree(parent_name)\n if len(sub_output) == 1:\n return sub_output[0]\n else:\n return sub_output\n else:\n return pointer.value\n\n elif pointer.next != None:\n if pointer.next.next == end_pointer:\n #unary operation\n left = None\n right = None\n \n if isinstance(pointer.value, str):\n left = pointer.value\n else:\n left = construct_tree_subset(pointer, pointer.next, parent_name)\n if isinstance(pointer.next.value, str):\n right = pointer.next.value\n else:\n right = construct_tree_subset(pointer.next, end_pointer, parent_name)\n \n return [left, right]\n \n \"\"\"\n # see if the first pointer is a lefthand function.\n if isinstance(pointer.value, str):\n if maths_1left_func(pointer.value):\n right = construct_tree_subset(pointer.next, end_pointer, parent_name)\n #print(\"here\", pointer.value, right)\n return [pointer.value, right]\n \"\"\"\n \n # otherwise triple or more,\n\n output = [None]*3\n\n first_eq = None\n first_pow = None\n first_mult = None\n first_add = None\n \n sign = False\n\n left_elem = False\n\n cont = True\n p = pointer\n last_p = p\n while cont:\n if p == end_pointer:\n cont = False\n else:\n value = p.value\n if value == '^':\n left_elem = False\n if first_pow == None:\n first_pow = p\n left_elem = False\n\n elif value == '*' or value == '/':\n left_elem = False\n if first_mult == None:\n first_mult = p\n left_elem = False\n\n elif value == '+' or value == '-':\n if sign and left_elem:\n # sign ==> form of -x, hence if x-y form take that over -x\n first_add = p\n sign = False\n \n elif first_add == None:\n first_add = p\n if left_elem:\n pass\n else:\n sign = True\n left_elem = False\n \n elif value == '<' or value == '>' or value == '<=' or value == '>=':\n if first_eq == None:\n first_eq = p\n else:\n value_str, full_str = error_string(pointer, end_pointer)\n raise SyntaxError(\"\\nStack Trace: {0}\\nMultiple inequalities is unsupported.\\n Section: '{1}'\\n Full: '{2}'\"\\\n .format(parent_name, value_str, full_str))\n left_elem = False\n \n else:\n left_elem = True\n last_p = p\n p = p.next\n \n func_p = None\n double_params = True\n \n if first_eq != None:\n func_p = first_eq\n elif first_add != None and not sign:\n func_p = first_add\n elif first_mult != None:\n func_p = first_mult\n elif first_pow != None:\n func_p = first_pow\n\n elif first_add and sign:\n #print(\"here\", first_add.next.value, first_add.next.next.value)\n right = construct_tree_subset(first_add.next, end_pointer, parent_name)\n return [first_add.value, right]\n else:\n # get string representation of values\n value_str, full_str = error_string(pointer, end_pointer)\n \n raise ValueError(\"\\nStack Trace: {0}\\nMultiple parameters in parse string, however no valid function.\\n Section: \\\"{1}\\\"\\n Full: \\\"{2}\\\"\"\\\n .format(parent_name, value_str, full_str))\n\n\n #### format tree\n\n output[0] = construct_tree_subset(pointer, func_p, parent_name)\n output[2] = construct_tree_subset(func_p.next, end_pointer, parent_name)\n output[1] = func_p.value\n\n return output\n \n output = construct_tree_subset(self.head, None, parent_name)\n if isinstance(output, list):\n pass # in desired output\n else:\n output = [output]\n return output", "def write_tree(tree, tasks, level=0, pfwid=None, operator=None):\n dtm = datetime.timedelta(hours=4)\n #base = \" \" * level\n line = None\n host = None\n status = 0\n for tid, val in tree.iteritems():\n if level == 0 and tasks[tid]['length'] < dtm:\n return None, None, None\n if level == 0 and tasks[tid]['status'] is not None:\n return None, None, None\n host = tasks[tid]['exec_host']\n if 'exec' in tasks[tid]['name']:\n execl = \"%s (%s)\" % (tasks[tid]['name'], tasks[tid]['label'])\n else:\n execl = tasks[tid]['name']\n if tasks[tid]['length'] is not None:\n if tasks[tid]['length'] > dtm and tasks[tid]['name'] not in ['attempt', 'block', 'job']:\n status = 1\n if tasks[tid]['status'] is not None:\n if int(tasks[tid]['status']) != 0:\n status = 2\n# line.append(\"%s%i %30s %27s %23s %5s\" % (base, tasks[tid]['id'], execl, tasks[tid]['start_time'], tasks[tid]['length'], str(tasks[tid]['status'])))\n line = Task(tasks[tid]['id'], execl, tasks[tid]['start_time'], tasks[tid]['length'], tasks[tid]['status'], tasks[tid]['exec_host'], level, pfwid, operator)\n temp, stat, lns = write_tree(val, tasks, level+1)\n if temp is not None:\n host = temp\n line.add_child(lns)\n status = max(stat, status)\n return host, status, line", "def gentree(self, symbol): \n ### YOUR CODE HERE\n tree = \"(\" + symbol + \" \"\n expansion = self.random_expansion(symbol)\n for s in expansion:\n if self.is_terminal(s):\n tree += \" \" + s\n else:\n tree += \" \" + self.gentree(s)\n tree += \")\"\n ### END YOUR CODE\n return tree", "def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def expansion(self, actions):\n for action in actions: \n self.children[action[0]] = TreeNode()", "def repr_tree(tree, viz, current_node, rec_depth, color_map, parameters):\r\n for child in tree.children:\r\n if child.operator is None:\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if child.label is None:\r\n viz.node(this_trans_id, \"tau\", style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(child, color_map)\r\n viz.node(this_trans_id, str(child), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n condition_wo_operator = child.operator == pt_operator.Operator.XOR and len(\r\n child.children) == 1 and child.children[0].operator is None\r\n if condition_wo_operator:\r\n childchild = child.children[0]\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if childchild.label is None:\r\n viz.node(this_trans_id, str(childchild), style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(childchild, color_map)\r\n viz.node(this_trans_id, str(childchild), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n viz.attr('node', shape='circle', fixedsize='true', width=\"0.6\",\r\n fontsize=\"14\")\r\n op_node_identifier = str(uuid.uuid4())\r\n node_color = get_color(child, color_map)\r\n viz.node(op_node_identifier, str(child.operator), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, op_node_identifier)\r\n viz = repr_tree(child, viz, op_node_identifier, rec_depth + 1, color_map, parameters)\r\n return viz", "def generate_tree(tree, out_file='tree'):\n activities = [\n 'Working at Computer',\n 'Standing Up, Walking and Going up/down stairs',\n 'Standing',\n 'Walking',\n 'Going Up/Down Stairs',\n 'Walking and Talking with Someone',\n 'Talking while Standing',\n ]\n file_name = path.join('..', 'proposal', out_file)\n export_graphviz(tree, out_file=\"{}.dot\".format(file_name), class_names=activities, rounded=True)\n # system(\"dot -Tpng {0}.dot -o {0}.png\".format(out_file))", "def write_newtree(node,text):\r\n if node.left!=None: # if the left node is not equal to none\r\n text.write(\"Question:\\n\") # write the question\r\n text.write(node.data) # write the data\r\n write_newtree(node.left,text) # recursivly call to insert question to the left node\r\n write_newtree(node.right,text) # recursivly call to insert question to the right node\r\n return node # updating node\r\n\r\n else: # if the left node is equal to none\r\n text.write(\"Guess:\\n\") # write the guess\r\n text.write(node.data) # write the data\r\n return node # updating node\r", "def walk(self, funcs):\n if self.use_album:\n targetName = \"AlbumName\"\n albums = [a for a in self.albums if\n a.get(\"Album Type\", None) == \"Regular\"]\n else:\n targetName = \"RollName\"\n albums = self.albums\n i = 0\n for folder in albums:\n i += 1\n if self.use_album:\n folderDate = None\n else:\n folderDate = self.appleDate(folder[\"RollDateAsTimerInterval\"])\n images = folder[\"KeyList\"]\n\n folderName = folder[targetName]\n\n #as we process albums/events in the iPhoto library, remove that album\n #from the list of import_albums we'll be importing at the end\n if self.import_albums:\n for ia in self.import_albums:\n for album_name in ia['album_names']:\n album_name = unicode(album_name, 'utf-8')\n if folderName == album_name:\n self.import_albums.remove(ia)\n\n if folderDate and self.use_date:\n date = '%(year)d%(delim)s%(month)02d%(delim)s%(day)02d' % {\n 'year': folderDate.year,\n 'month': folderDate.month,\n 'day': folderDate.day,\n 'delim': self.date_delimiter\n }\n if re.match(\"[A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}\", folderName):\n outputPath = date\n elif re.match(\"[0-9]{4}.[0-9]{2}.[0-9]{2} ?.*\", folderName):\n outputPath = folderName\n else:\n outputPath = date + \" \" + folderName\n if self.year_dir:\n outputPath = os.path.join(str(folderDate.year), outputPath)\n else:\n outputPath = folderName\n\n # Deconflict output directories\n targetFileDir = os.path.join(self.dest_dir, outputPath)\n if self.deconflict:\n j = 1\n while targetFileDir in self.output_dirs:\n targetFileDir = os.path.join(self.dest_dir, outputPath + \" %02d\"%j)\n j += 1\n self.output_dirs.add(targetFileDir)\n\n self.status(\"* Processing %i of %i: %s (%i images)...\\n\" % (\n i,\n len(albums),\n folderName,\n len(images)\n ))\n for imageId in images:\n for func in funcs:\n func(imageId, targetFileDir, folderDate)\n self.status(\"\\n\")\n\n if self.import_missing: \n self.status(\"importing folders:\\n\")\n for ia in self.import_albums:\n self.status(ia[\"album_dir\"] + \"\\n\")\n\n #using the \"Auto Import\" dir in iPhoto was unpredictable with respect to the resulting event name.\n #Using AppleScript to import the event, seams to always result in the event being properly named\n if not self.test:\n #There is probably a better way to do this. I noticed I had an album with an ' in it that errored...\n escaped_dir = ia[\"album_dir\"].replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"')\n os.system('''osascript -e '\ntell application \"iPhoto\"\n import from \"%s\"\nend tell\n' ''' % escaped_dir)", "def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def optimized_work_tree(obj, **kwargs):\n exclusions = kwargs.get('exclusions', {\"groups\": [], \"classes\": [], \"params\": []})\n groups_done = {}\n classes = {\"depths\": {}, \"content\": {}}\n params = {\"depths\": {}, \"content\": {}}\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n index_pop = to_index.pop\n index_extend = to_index.extend\n while to_index:\n (obj, depth) = index_pop()\n objname = obj.name\n if objname in groups_done and groups_done[objname] <= depth:\n continue\n\n objclasses = obj.classes.exclude(classname__in=exclusions['classes'])\n updated_classes = optimized_update_values(objclasses, \"classname\", \"classparams\", depth=depth, results=classes)\n\n objparams = obj.parameters.exclude(paramkey__in=exclusions['params'])\n updated_params = optimized_update_values(objparams, \"paramkey\", \"paramvalue\", depth=depth, results=params)\n\n if not updated_classes or not updated_params:\n return (\"Fail\", \"Fail\")\n\n groups_done[objname] = depth\n depth += 1\n children = ((group, depth) for group in obj.groups.exclude(name__in=exclusions['groups']))\n index_extend(children)\n\n params['content']['done_count'] = len(groups_done)\n return (classes[\"content\"], params[\"content\"])", "def saveTrees(self, trees, path=None, create=True, overwrite=True, return_path=False):\n\n path = self.get('save_path', path)\n\n if not path.endswith('/'):\n path += '/'\n\n if path is None:\n raise IOError('No path given for saving trees')\n\n # paths can reference things in the config object, so use string replacement now\n path = path % self\n path = self._getAbsolutePath(path)\n\n # make sure the path exists, and if it doesn't and we are supposed to,\n # create it\n if not os.path.exists(path):\n if create:\n os.makedirs(path)\n else:\n raise IOError('Path not found: %s' % path)\n\n # the format for the file name takes into account several options such as:\n # * underlabel\n # * num_trees\n # and customizes the file names based upon the existance (and value) of them\n if hasattr(self, 'underlabel'):\n pattern = 'run=%(run)s_numtrees=%(num_trees)s_samples=%(num_samples)s_underval=%(underval)s_underlabel=%(underlabel)s_pvalue=%(pvalue)s_depth=%(max_depth)s_distinctions=%(distinctions)s_stat=%(split_stat)s.pkl'\n\n elif hasattr(self, 'num_trees'):\n pattern = 'run=%(run)s_numtrees=%(num_trees)s_samples=%(num_samples)s_depth=%(max_depth)s_distinctions=%(distinctions)s_stat=%(split_stat)s.pkl'\n else:\n pattern = 'run=%(run)s_samples=%(num_samples)s_depth=%(max_depth)s_distinctions=%(distinctions)s_stat=%(split_stat)s.pkl'\n\n filename = os.path.join(path, pattern % self)\n\n # if we are just returning the path and not saving it\n if return_path:\n return filename\n\n if os.path.exists(filename) and not overwrite:\n raise RuntimeError('File exists and overwrite is False: %s' % filename)\n\n import pickle\n try:\n l = len(trees)\n print('Saving %s tree(s) to: %s' % (l, filename))\n except:\n print('Saving tree to: %s' % (filename))\n\n pickle.dump(trees, open(filename, 'wb'), pickle.HIGHEST_PROTOCOL)\n f_stat = os.stat(filename)\n print('\\tsize:', formatBytes(f_stat.st_size))", "def main():\n # If you change this, use an odd number.\n SIZE = 21\n print(makeTree(SIZE))", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def Build_FT(filename):\n\ttree = ET.parse(filename)\n\troot = tree.getroot()\n\tglobal dict\n\tglobal leaves\n\tglobal nonleaves\n\tfor n in root:\n\t\tname=n.attrib['id']\n\t\t#nonleaves.append(name)\n\t\t\n\t\tif name not in dict:\n\t\t\tdict[name]=[]\n\t\tfor i in range(len(n)):\n\t\t\tif i==0:\n\t\t\t\tif n[i].text.upper()==\"AND\":\n\t\t\t\t\tdict[name].append(1)\n\t\t\t\telse:\n\t\t\t\t\tdict[name].append(0)\n\t\t\telse:\n\t\t\t\tdict[name].append(n[i].text)\t\t\t\t\n\t\t\t\tif n[i].text not in dict:\t# Also create entry for kid nodes\t\t\t\t\t\t\n\t\t\t\t\tdict[n[i].text]=[]\n\t\t\t\t\t\n\t\"\"\"Test whether a node is leaf node, if yes add it to list leaves\"\"\"\n\tfor i in dict:\n\t\tdict_all[i]=-1\n\t\tif not dict[i]:\n\t\t\tleaves.append(i)", "def build_tree(elem, level = 1024, remove_root = 0):\n if level <= 0:\n return None\n level -= 1\n\n lista = elem.objectValues()\n node = {}\n children = []\n\n for i in lista:\n result = (build_tree(i, level))\n if result:\n children.append(result)\n\n if remove_root:\n return children\n else:\n node[\"title\"] = get_id(elem)\n node[\"children\"] = []\n\n if len(lista):\n node[\"key\"] = get_id(elem)\n node[\"isFolder\"] = True\n\n if not len(node[\"children\"]):\n node[\"isLazy\"] = True\n\n node[\"children\"] = children\n\n return node" ]
[ "0.635694", "0.63468206", "0.6294683", "0.6240964", "0.6053419", "0.59460986", "0.59322184", "0.58918864", "0.5855702", "0.581498", "0.57331216", "0.56759137", "0.56216335", "0.5617605", "0.5604815", "0.56032133", "0.56020284", "0.559628", "0.5595393", "0.5594104", "0.55520797", "0.55433875", "0.55249465", "0.55222094", "0.5518842", "0.55170375", "0.5506502", "0.5506484", "0.5504516", "0.55010074", "0.54975927", "0.54940563", "0.54856175", "0.5485391", "0.5481675", "0.5472916", "0.5468736", "0.54589057", "0.5457866", "0.54361904", "0.5431624", "0.54276335", "0.5423116", "0.54140043", "0.5411937", "0.5402696", "0.53995216", "0.53957313", "0.5394326", "0.5391646", "0.53915423", "0.5389658", "0.5387684", "0.53771734", "0.5375789", "0.5363099", "0.53561884", "0.5349922", "0.5339006", "0.53388965", "0.53377", "0.53132373", "0.53111315", "0.53099144", "0.5288161", "0.527111", "0.5271027", "0.52693087", "0.52662176", "0.5258893", "0.5258231", "0.52494484", "0.5248356", "0.5240987", "0.52401775", "0.52401716", "0.5229101", "0.52181983", "0.5215285", "0.5213548", "0.52074105", "0.5200917", "0.51992893", "0.5186488", "0.5186328", "0.5180128", "0.51799643", "0.51757365", "0.51706344", "0.5169135", "0.5165748", "0.51626706", "0.516038", "0.51602876", "0.51563144", "0.51517415", "0.5148939", "0.51436275", "0.5142665", "0.5141684", "0.5140731" ]
0.0
-1
opens the chosen exceldocument and returns it as sheet
def get_excel(exceldocument): sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0) return sheet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openExcelSheet(outputFileName):\n workbook = Workbook()\n worksheet = workbook.add_sheet(\"Sheet 1\")\n return workbook, worksheet", "def _get_spreadsheet(i):\n path = io_mgr.get_parties_spreadsheet(i)\n if not os.path.exists(path):\n raise IOError()\n\n return openpyxl.load_workbook(path, read_only=True)", "def open_workbook(self, workbook):\n mylog.debug('Opening workbook %s' % workbook)\n workbook = openpyxl.load_workbook(filename = workbook)\n return workbook", "def open_xlsx_file(filepath, mode=\"rb\"):\n archive = XslxFile(filepath)\n archive.open()\n return archive", "def _read_xls(self, options, datas):\n book = xlrd.open_workbook(file_contents=datas)\n return self._read_xls_book(book)", "def open( self, filename ):\r\n #http://www.oooforum.org/forum/viewtopic.phtml?t=35344\r\n properties = []\r\n properties.append( OpenOfficeDocument._makeProperty( 'Hidden', True ) ) \r\n properties = tuple( properties )\r\n self.oodocument = self.openoffice.loadComponentFromURL( uno.systemPathToFileUrl( os.path.abspath( filename ) ), \"_blank\", 0, properties )", "def openExcelSheet(outputFileName):\n\tworkbook = Workbook(encoding='utf-8')\n\tworksheet = workbook.add_sheet(\"Sheet 1\")\n\tworksheet.col(0).width = 8000\n\tworksheet.col(1).width = 3000\n\tworksheet.col(2).width = 6000\n\tworksheet.col(3).width = 6000\n\tworksheet.col(4).width = 6000\n\tworksheet.col(5).width = 15000\n\tworksheet.col(6).width = 15000\n\tworksheet.col(7).width = 15000\n\tworksheet.col(8).width = 6000\n\tworksheet.col(9).width = 15000\n\tworksheet.col(10).width = 15000\n\tworksheet.col(11).width = 6000\n\tworksheet.col(12).width = 6000\n\tworksheet.col(13).width = 10000\n\tworksheet.col(14).width = 6000\n\treturn workbook, worksheet", "def handle(self):\n return pandas.ExcelFile(str(self.source))", "def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f", "def load_data(fname='SeatTest_New.xlsx'):\n return pd.ExcelFile(fname)", "def get_sheet(excel_fname, sheet_name=None):\r\n book = xlrd.open_workbook(excel_fname)\r\n\r\n if sheet_name:\r\n\r\n if sheet_name in book.sheet_names():\r\n sheet = book.sheet_by_name(sheet_name)\r\n return sheet\r\n else:\r\n print(\"ERROR: Sheet '{0}' cannot be found in workbook '{1}'\".format(\r\n sheet_name, excel_fname))\r\n sys.exit(1)\r\n\r\n else:\r\n # Get the first worksheet.\r\n sheet = book.sheet_by_index(0)\r\n return sheet", "def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"", "def fromxlsx(filename, sheet=None, range=None, **kwargs):\n \n return XLSXView(filename, sheet=sheet, range=range, **kwargs)", "def get_workbook(path):\n wb = openpyxl.load_workbook(path, read_only=True)\n return wb", "def load_sheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_obj = wb[sheet_name]\n return sheet_obj, wb", "def test_open_order_sheet(self):\n order_processor = OrderProcessor()\n order_processor.open_order_sheet('COMP_3522_A4_orders.xlsx')\n self.assertTrue(self, isinstance(order_processor.orders_data_frame,\n DataFrame))", "def get_worksheet(self, workbook):\n for worksheet_name in workbook.sheet_names():\n return workbook.sheet_by_name(worksheet_name)", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def login_open_sheet(oauth_key_file, spreadsheet):\n try:\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\n gc = gspread.authorize(credentials)\n worksheet = gc.open(spreadsheet).sheet1 # pylint: disable=redefined-outer-name\n return worksheet\n except Exception as ex: # pylint: disable=bare-except, broad-except\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, \\\n and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\n print('Google sheet login failed with error:', ex)\n sys.exit(1)", "def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None", "def creaXl(nombre):\r\n return xlw.Workbook(nombre)", "def to_spreadsheet(self) -> sc.Spreadsheet:\n\n f, wb = self.to_workbook()\n wb.close() # Close the workbook to flush any xlsxwriter content\n spreadsheet = sc.Spreadsheet(f) # Wrap it in a spreadsheet instance\n return spreadsheet", "def _read_workbook_2007(maldoc):\n\n # Read in the 2007+ cells.\n color_print.output('g', \"Analyzing Excel 2007+ file ...\")\n workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc) \n color_print.output('g', \"Extracted XLM from ZIP archive.\")\n if (workbook_info is None):\n return (None, None, None)\n if (len(workbook_info) == 0):\n color_print.output('y', \"WARNING: No XLM macros found.\")\n return (None, None, None)\n\n if debug:\n print(\"=========== START 2007+ CONTENTS ==============\")\n for sheet in workbook_info.keys():\n print(\"\\n------\")\n print(sheet)\n print(\"\")\n for c in workbook_info[sheet].keys():\n print(str(c) + \" ---> \" + str(workbook_info[sheet][c]))\n print(\"=========== DONE 2007+ CONTENTS ==============\")\n \n # Figure out which sheet probably has the XLM macros.\n xlm_sheet_name = None\n max_formulas = -1\n for sheet in workbook_info.keys():\n if (len(workbook_info[sheet]) > max_formulas):\n max_formulas = len(workbook_info[sheet])\n xlm_sheet_name = sheet\n\n # Parse each formula and add it to a sheet object.\n xlm_cells = {}\n for cell_index in workbook_info[xlm_sheet_name].keys():\n\n # Value only cell?\n row = cell_index[0]\n col = cell_index[1]\n if (row not in xlm_cells):\n xlm_cells[row] = {}\n raw_formula = workbook_info[xlm_sheet_name][cell_index][0]\n if (raw_formula is None):\n\n # Do we have a value?\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n\n # Just save the value in the cell.\n xlm_cells[row][col] = formula_val\n continue\n \n # Parse the formula into an XLM object.\n formula_str = b\"=\" + raw_formula\n formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)\n\n # Set the value of the formula if we know it.\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n formula.value = formula_val\n\n # Save the XLM object.\n formula.update_cell_id(cell_index)\n xlm_cells[row][col] = formula\n color_print.output('g', \"Parsed MS XLM macros.\")\n \n # Merge the XLM cells with the value cells into a single unified spereadsheet\n # object.\n workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)\n if (workbook is None):\n color_print.output('r', \"ERROR: Merging XLM cells failed. Emulation aborted.\")\n return (None, None, None)\n \n # Done.\n return (workbook, xlm_cell_indices, xlm_sheet)", "def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n sys.exit(1)", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n print(datetime.datetime.now())\r\n sys.exit(1)", "def get_xlsx_report(url, sheet_name):\n r = requests.get(url, verify=False)\n data = pyexcel_xlsx.get_data(io.BytesIO(r.content))\n return data[sheet_name]", "def get_workbook(self):\n return self.workbook", "def login_open_sheet(oauth_key_file, spreadsheet):\n\ttry:\n\t\tjson_key = json.load(open(oauth_key_file))\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tjson_key['private_key'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t['https://spreadsheets.google.com/feeds'])\n\t\tgc = gspread.authorize(credentials)\n\t\tworksheet = gc.open(spreadsheet).sheet1\n\t\treturn worksheet\n\texcept Exception as ex:\n\t\tprint 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!'\n\t\tprint 'Google sheet login failed with error:', ex\n\t\tsys.exit(1)", "def __load( self, raw_content ):\n return( pd.read_excel( BytesIO( raw_content ) ) )", "def getSheet(self, sheet_name):\r\n return self.workbook.Sheets(sheet_name)", "def open_document(filepath, show=True):\n\t\n\tk = krita.Krita.instance()\n\tprint('Debug: opening %s' % filepath)\n\tdoc = k.openDocument(filepath)\n\tif show:\n\t\tApplication.activeWindow().addView(doc)\n\treturn doc", "def getExcelApp(self):\r\n return self.excelapp", "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ws_merval.range('A1').expand().value = df_arg_stocks\n # SHEET BONOS\n ws_bonds = wb.sheets('Bonos')\n ws_bonds.range('A1').expand().value = df_bonds\n # SHEET CCL MERVAL\n ws_ccl = wb.sheets('CCL ADRs')\n ws_ccl.range('A1').expand().value = df_arg_stocks_ccl\n\n tiempo = time.asctime()\n print('Carga exitosa de datos. Ultima ejecución: ',tiempo)", "def get_worksheet(spreadsheet, name, create_if_non_existant=True, creation_func=None):\n for worksheet in spreadsheet.worksheets():\n if worksheet.title == name:\n return worksheet\n if create_if_non_existant:\n worksheet = spreadsheet.add_worksheet(title=name, rows=\"300\", cols=\"10\")\n if creation_func:\n creation_func(worksheet)\n return worksheet\n return None", "def read_file():\n # Read file\n wb = load_workbook(INPUT_FILE)\n ws = wb.worksheets[0]\n return ws", "def get_drive_worksheet(spreadsheet_key, worksheet_name):\n gspread = get_authenticated_gspread()\n spreadsheet = gspread.open_by_key(spreadsheet_key)\n return spreadsheet.worksheet(worksheet_name)", "def get_sheet():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n return sheet", "def get_worksheet(self):\n return self.worksheet", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n\ttry:\r\n\t\tscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/spreadsheets','https://www.googleapis.com/auth/drive']\r\n\r\n\t\tjson_key = json.load(open(oauth_key_file))\r\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],json_key['private_key'],scope)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t#below line doesn't work anymore so commented, and use the other way to authorize\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t#gc = gspread.authorize(credentials)\r\n\t\t#instead, use one of the 2 below lines\r\n\t\t#gc = gspread.service_account(filename='D:\\Online Classes\\edureka\\Edureka materials\\S3\\code\\iotsheets-276804-a20f837deb72.json')\r\n\t\tgc = gspread.service_account(GDOCS_OAUTH_JSON)\r\n\t\t\r\n\t\tworksheet = gc.open(spreadsheet).sheet1\r\n\t\treturn worksheet\r\n\texcept Exception as ex:\r\n\t\tprint('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n\t\tprint('Google sheet login failed with error:', ex)\r\n\t\tsys.exit(1)", "def get_sheet(sheet, doc):\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRET_FILE, scope)\n\n gc = gspread.authorize(credentials)\n wks = gc.open(doc)\n sheet = wks.worksheet(sheet)\n data = sheet.get_all_values()\n h1 = ffill(data[0])\n\n # remove extra whitespace\n h1 = [k.strip() for k in h1]\n h2 = [k.strip() for k in data[1]]\n\n # create a multiindex\n columns = MultiIndex.from_tuples(zip(h1, h2))\n\n # populate the dataframe\n df = DataFrame(data[2:], columns=columns)\n return df", "def open_workbooks(self):\n try:\n self.wb_alm = load_workbook(self.fn_alm)\n self.wb_defect = load_workbook(self.fn_defect)\n self.wb_enhancement = load_workbook(self.fn_enhancement)\n self.wb_incident = load_workbook(self.fn_incident)\n self.wb_destination = load_workbook(self.fn_destination)\n\n self.wb_alm.iso_dates = True\n self.wb_defect.iso_dates = True\n self.wb_enhancement.iso_dates = True\n self.wb_incident.iso_dates = True\n self.wb_destination.iso_dates = True\n except Exception as e:\n self.error(str(e))\n return False\n\n return True", "def spreadsheet(self, key):\r\n return resource.Spreadsheet(self, key)", "def get_excel(self, file_name):\n global download_component\n\n download_soup = BeautifulSoup(self.res.text, 'lxml')\n download_component = get_download_component(download_soup)\n\n #Start excel session\n xsess = requests.Session()\n xsess.headers = EXCEL_HEADERS\n \n #prepare excel session\n self.data['SAPEVENTQUEUE'] = \"Button_Press~E002Id~E004\" + \\\n download_component + \"~E003~E002ResponseData~E004delta~E005ClientAction~E004submit~E003~E002~E003\"\n self.res = self.sess.post(self.url, data=self.data)\n\n #parse data from prepared excel session\n fileid, action = get_excel_url(BeautifulSoup(self.res.text,'lxml-xml')) \n \n #replace\n xurl = HOST_URL + action\n xurl = xurl.replace(\"\\\\x2f\",\"/\")\n xurl = xurl.replace(\"\\\\x7e\",\"~\")\n xurl = xurl.replace(\"\\\\x3f\", \"?\")\n xurl = xurl.replace(\"\\\\x2d\",\"-\")\n xurl = xurl.replace(\"\\\\x3d\",\"=\")\n xurl = xurl.replace(\"\\\\x253a\",\":\")\n xurl = xurl.replace(\"\\\\x26\",\"&\")\n xres = xsess.post(xurl)\n \n #write file\n with open(file_name,'wb') as f:\n f.write(xres.content)", "def from_excel(self, path, worksheet=0):\n reader, release_resources = _from_excel(path, worksheet=worksheet)\n return Reader(reader, closefunc=release_resources)", "def export_xlsx(self, key):\n spreadsheet_file = self.client.files().get(fileId=key).execute()\n links = spreadsheet_file.get('exportLinks')\n downloadurl = links.get('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n resp, content = self.client._http.request(downloadurl)\n return content", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def fileOpen(filePath,fileType):\n if os.path.exists(filePath) and os.path.getsize(filePath) > 0:\n print \"Retrieving file:%s\" %filePath\n if fileType.lower() == \"xl\":\n fileObject = xlrd.open_workbook(filePath)\n else:\n with open(filePath, 'r') as FH:\n if fileType.lower() == \"json\":\n fileObject = json.load(FH) \n elif fileType.lower() == \"txt\":\n fileObject = FH.readlines()\n elif fileType.lower() == \"csv\":\n file_data = csv.reader(FH)\n fileObject = output = list(file_data)\n elif fileType.lower() == \"j2\":\n fileObject = Template(FH.read())\n else:\n print \"Invalid fileType\"\n fileObject = False\n return fileObject\n else:\n print \"File does not exist or is empty: %s\" %filePath\n return False", "def load_inventory_sheet(workbook):\n sheets = workbook.get_sheet_names()\n return workbook[sheets[-1]]", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def sw_interactive_mode(xl_file):\n dfs = {sheet: xl_file.parse(sheet, index_col=0)\n for sheet in xl_file.sheet_names if (sheet[0] != '_')}\n print(\"Which one do you want to process: \")\n sheet_list = list(dfs.keys())\n for sheet in sheet_list:\n print(sheet, \"\\t\", \"[\" + str(sheet_list.index(sheet)) + \"]\")\n algo = input(\"Select the sheet: \")\n print(algo)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def create_spreadsheet(\n collection: db.Standard_collection,\n exported_documents: List[Any],\n title: str,\n share_with: List[str],\n) -> Any:\n flat_dicts = sheet_utils.prepare_spreadsheet(\n collection=collection, docs=exported_documents\n )\n return sheet_utils.write_spreadsheet(\n title=title, docs=flat_dicts, emails=share_with\n )", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def read_from_xlsx(filename='Infosys.xlsx', sheet='Profit & Loss', cell_range=OrderedDict([('A14','J14'), ('A15','J15')])):\n xl = win32com.client.Dispatch('Excel.Application')\n try:\n filename = env.DOWNLOAD_DIR + '\\\\' + filename\n wb = xl.Workbooks.Open(Filename=filename, ReadOnly=1, Editable=True)\n ws = wb.Worksheets(sheet)\n for k, val in cell_range.items():\n print (ws.Range(k + ':' + val).Value)\n\n except Exception as e:\n logger.exception(e)\n\n else:\n wb.Close(True)", "def _load_document(path, app):\n start_inventor()\n document_type_enum = {\n 12289: 'UnnownDocument',\n 12290: 'PartDocument',\n 12291: 'AssemblyDocument',\n 12292: 'DrawingDocument',\n 12293: 'PresentationDocument',\n 12294: 'DesignElementDocument',\n 12295: 'ForeignModelDocument',\n 12296: 'SATFileDocument',\n 12297: 'NoDocument',\n }\n try:\n app.Documents.Open(str(path))\n document_type = document_type_enum[app.ActiveDocumentType]\n doc = win32com.client.CastTo(app.ActiveDocument, document_type)\n print(doc, document_type)\n return doc\n except:\n print('unable to load file')\n return None", "def create_workbook(self):\n try:\n if '.xlsm' in self.file_name or '.xltm' in self.file_name:\n self.wb = load_workbook(self.file_path, keep_vba=True)\n else:\n if '.xlsx' not in self.file_name:\n self.file_name = self.file_name + '.xlsx'\n self.wb = load_workbook(os.path.join(self.file_path, self.file_name))\n except Exception as e:\n self.wb = Workbook()\n\n sheet_names = self.wb.sheetnames\n if self.sheet_name in sheet_names:\n self.ws = self.wb[self.sheet_name]\n else:\n self.ws = self.wb.create_sheet(title=self.sheet_name)\n\n if self.data_len < self.ws.max_row:\n self.clear_sheet()\n self.wb.save(os.path.join(self.file_path, self.file_name))\n\n self.write_to_sheet()\n\n self.wb.save(os.path.join(self.file_path, self.file_name))", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def load_excel_df(path_datafile):\n\treturn pd.read_excel(path_datafile, sheet_name=None)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def _open_data_source(self, *args):\n if len(args) != 0:\n # For first call to open (open())\n self.ds_filename = args[0]\n self.ds_tablename = args[1]\n self.ds_file = load_workbook(filename = args[0], use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = args[1])\n else:\n # For reopening the file (reset())\n self.ds_file = load_workbook(filename = self.ds_filename, use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = self.ds_tablename)\n # In any case we need a reader object to iterate over the table content \n self.ds_reader = self.ds_table.iter_rows()", "def write_to_xls_file(self,xls_filename,sheet_name):\r\n rb = xlrd.open_workbook(xls_filename,formatting_info=True)\r\n workbook = copy(rb) #a writable copy (I can't read values out of this, only write to it)\r\n\r\n ''' get all sheetnames '''\r\n list_of_sheetnames = []\r\n list_of_sheetnames = rb.sheet_names()\r\n ''' make a set of sheetnames without duplication '''\r\n sheet_names = set(list_of_sheetnames)\r\n ''' verify if a given ticker existed or not '''\r\n if (sheet_name in sheetnames) == True:\r\n flag = True\r\n else:\r\n flag = False\r\n\r\n if flag == True:\r\n print \"The data sheet named \" + ticker_name + \" existed.\"\r\n else:\r\n print \"No data sheet named \" + ticker_name + \", created new\"\r\n w_sheet = workbook.add_sheet(ticker_name)\r\n w_sheet.write(0,0,'Eod_C_Action')\r\n w_sheet.write(0,1,'Eod_I_Version')\r\n w_sheet.write(0,2,'UsrId')\r\n w_sheet.write(0,3,'Eod_D_Creation')\r\n w_sheet.write(0,4,'Eod_D_Quote')\r\n w_sheet.write(0,5,'InsId')\r\n w_sheet.write(0,6,'Eod_I_ProviderId')\r\n w_sheet.write(0,7,'Eod_N_Open')\r\n w_sheet.write(0,8,'Eod_N_High')\r\n w_sheet.write(0,9,'Eod_N_Low')\r\n w_sheet.write(0,10,'Eod_N_Close')\r\n w_sheet.write(0,11,'Eod_I_Volume')\r\n \r\n for row_index in range(1,len(self.close)+1):\r\n w_sheet.write(row_index,0,'A')\r\n w_sheet.write(row_index,1,0)\r\n w_sheet.write(row_index,2,8)\r\n w_sheet.write(row_index,3,datetime.datetime.now().strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,4,self.date[row_index-1].strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,5,1)\r\n w_sheet.write(row_index,6,1)\r\n w_sheet.write(row_index,7,self.open_[row_index-1])\r\n w_sheet.write(row_index,8,self.high[row_index-1])\r\n w_sheet.write(row_index,9,self.low[row_index-1])\r\n w_sheet.write(row_index,10,self.close[row_index-1])\r\n w_sheet.write(row_index,11,self.volume[row_index-1])\r\n\r\n workbook.save(xls_filename)", "def _read_workbook_97(maldoc):\n\n # Run olevba on the file and extract the XLM macro code lines.\n color_print.output('g', \"Analyzing Excel 97 file ...\")\n xlm_code = _extract_xlm(maldoc)\n color_print.output('g', \"Extracted XLM with olevba.\")\n if debug:\n print(\"=========== START RAW XLM ==============\")\n print(xlm_code)\n print(\"=========== DONE RAW XLM ==============\")\n if (xlm_code is None):\n color_print.output('r', \"ERROR: Unable to extract XLM. Emulation aborted.\")\n return (None, None, None)\n\n # Parse the XLM text and get XLM objects that can be emulated.\n xlm_cells = XLM.stack_transformer.parse_olevba_xlm(xlm_code)\n color_print.output('g', \"Parsed olevba XLM macros.\")\n if (xlm_cells is None):\n color_print.output('r', \"ERROR: Parsing of XLM failed. Emulation aborted.\")\n return (None, None, None)\n\n # Merge the XLM cells with the value cells into a single unified spereadsheet\n # object.\n workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)\n if (workbook is None):\n color_print.output('r', \"ERROR: Merging XLM cells failed. Emulation aborted.\")\n return (None, None, None)\n\n # Done. \n return (workbook, xlm_cell_indices, xlm_sheet)", "def get_xls(xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls", "def sheet(self, name, encoding=None, order_by=None):\n return _ExcelSheet(self, name, encoding, order_by)", "def import_excel(self):\n self.ensure_one()\n if self.file_import:\n filecontent = base64.b64decode(self.file_import)\n try:\n # Todo: import excel\n input = cStringIO.StringIO()\n input.write(filecontent)\n wb = open_workbook(file_contents=input.getvalue())\n problem_emails = {\"inserted_names\": [],\n \"inserted_emails\": [],\n \"invalid_emails\": [],\n \"duplicate_names\": [],\n \"duplicate_emails\": []}\n for sheet in wb.sheets():\n try:\n self.insert_db(sheet, wb, problem_emails)\n except Exception as e:\n raise (str(e))\n\n except:\n # todo: import csv\n wb = filecontent.split('\\r\\n')\n for line in range(1, len(wb) - 1):\n line_data = wb[line].split(',')\n self.crete_line(line_data[0], line_data[1])\n\n if problem_emails['invalid_emails']:\n raise except_orm(_('Invalid Email Format Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['invalid_emails']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_names']:\n raise except_orm(_('Duplicate Name Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_names']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_emails']:\n raise except_orm(_('Duplicate Email Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_emails']))) + '\\n\\n Please check and try again.'))\n\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'shipmaster.invitation',\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def GetOpenedFile(self,file):\n\t\tif isinstance(file,str):\n\t\t\tindex = self.OpenedFilenames.index(file)\n\t\telif isinstance(file,int):\n\t\t\tindex=file\n\t\telse:\n\t\t\traise PycomError('Type of file in GetOpenedFile is wrong ')\n\t\treturn self.acad.Documents.Item(index)", "def show_book_content(workbook = None):\n if not workbook:\n return\n \n print \"work sheet info:\"\n print \"#, name, #rows, #cols\"\n for casename in workbook.sheet_names():\n sheet = workbook.sheet_by_name(casename)\n print sheet.number, sheet.name, sheet.nrows, sheet.ncols", "def obtenerHoja(libro, nombre):\r\n return libro.sheet_by_name(nombre)", "def get_worksheet(sheet_id, sheet_name):\n if (sheet_id, sheet_name) in WORKSHEET_CACHE:\n return WORKSHEET_CACHE[(sheet_id, sheet_name)]\n\n sheet = get_spreadsheet(sheet_id)\n worksheet = sheet.worksheet(sheet_name)\n\n WORKSHEET_CACHE[(sheet_id, sheet_name)] = worksheet\n return worksheet", "def get_or_create_spreadsheet(gc, name, share_with):\n try:\n sh = gc.open(name)\n except SpreadsheetNotFound:\n sh = gc.create(name)\n sh.share(share_with, perm_type='user', role='writer')\n return sh", "def _get_sheet(self, ws_name):\n return self._spreadsheet.sheet_by_name(ws_name)", "def get_sheet(self, title=''):\n\n if not title and not self._actual_sheet:\n if not len(self._pages):\n self._actual_sheet = self._add_sheet('Page 1')\n else:\n for page in self._pages:\n self._actual_sheet = self._pages[page]\n break\n elif not title and self._actual_sheet:\n self._actual_sheet = self._actual_sheet\n else:\n if title not in self._pages:\n raise IndexError(\n 'Pagina \"%s\"\" no está entre las paginas \"%s\"' % (title, ', '.join([x for x in self._pages]))\n )\n self._actual_sheet = self._pages[title]\n return self._actual_sheet", "def google_sheets_connector():\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def test_from_file_xls(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xls')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def read_sheet(self, native_sheet):\n sheet = XLSXSheet(native_sheet, **self._keywords)\n return {sheet.name: sheet.to_array()}", "def select_excel(self, recursive=True): # pragma: no cover\n return self.select_by_ext(self._ms_excel_ext, recursive)", "def convert_xlsx_to_xls(inp_dict):\n if inp_dict[\".xlsx\"]:\n for fname in inp_dict[\".xlsx\"]:\n fname = os.path.abspath(fname.encode(\"utf-8\"))\n fname = os.path.abspath(fname.decode(\"utf-8\"))\n excel = EnsureDispatch('Excel.Application')\n wb = excel.Workbooks.Open(fname)\n excel.DisplayAlerts = False\n wb.SaveAs(fname[:-1], FileFormat=56) #FileFormat = 51 is for .xlsx extension\n wb.Close() #FileFormat = 56 is for .xls extension\n excel.Application.Quit()\n excel.DisplayAlerts = True\n inp_dict[\".xls\"].append(fname[:-1])\n inp_dict[\"del\"].append(fname[:-1])\n return inp_dict", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)", "def import_worksheet(filename, sheetname, range_start='A1'):\n app = xw.App(visible=False)\n book = xw.Book(filename)\n sheet = book.sheets(sheetname)\n excel_data = sheet.range(range_start).expand('table').value\n keys = [snake_case(key) for key in excel_data[0]]\n data = [dict(zip(keys, values)) for values in excel_data[1:]]\n book.close()\n app.quit()\n return data", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def test_excel_simple_input(self, extension='xls'):\n excel_support = getattr(settings, 'EXCEL_SUPPORT', django_tables2_reports.utils.get_excel_support())\n response = self.table.treatement_to_response(\n self.table.as_csv(HttpRequest()),\n report_format='xls')\n self.assertEqual(response.status_code, 200)\n open('test-file-%s.%s' % (excel_support, extension),\n 'wb').write(response.content)", "def create_new_workbook():\r\n merged_wb = openpyxl.Workbook() # create new Workbook\r\n merged_wb[\"Sheet\"].title = \"Merged Data\" # change the title of the new sheet to \"Merged Data\"\r\n set_headers(merged_wb[\"Merged Data\"]) # set the headers\r\n merged_wb.save('Merged_Data.xlsx') # save the Workbook with the filename \"Merged_Data.xlsx\"\r\n return merged_wb", "def copy_excel_from_sharepoint():\n\tconf = get_conf_from_json()\n\ttoken = acquire_token_func\n\tclient = GraphClient(token)\n\ttenant_prefix = 'eribel354'\n\tfile_abs_url = conf['file_abs_url']\n\tfile_item = client.shares.by_url(file_abs_url).drive_item.get().execute_query()\n\twith open('./temp_excel.xlsm', 'wb') as tempxl:\n\t\tfile_item.download(tempxl).execute_query()", "def readSpreadsheet(url: str, cres_loc: str, alias:str):\n # gc = gspread.oauth() # oauth config, TODO (northdpole): make this configurable\n changes_present = False\n try:\n gc = gspread.service_account()\n sh = gc.open_by_url(url)\n logger.debug(\"accessing spreadsheet \\\"%s\\\" : \\\"%s\\\"\"%(alias,url))\n for wsh in sh.worksheets():\n if wsh.title[0].isdigit():\n logger.debug(\n \"handling worksheet %s (remember, only numbered worksheets will be processed by convention)\" % wsh.title)\n records = wsh.get_all_records()\n toyaml = yaml.safe_load(yaml.dump(records))\n try:\n validateYaml(yamldoc=toyaml, schema=CRE_LINK_schema)\n logger.debug(\"Worksheet is valid, saving to disk\")\n with open(os.path.join(cres_loc, wsh.title + \".yaml\"), \"wb\") as fp:\n fp.write(yaml.dump(toyaml, encoding='utf-8'))\n changes_present = True\n except jsonschema.exceptions.ValidationError as ex:\n logger.error(wsh.title + \" failed validation\")\n logger.error(ex)\n except gspread.exceptions.APIError as ae:\n logger.error(\"Error opening spreadsheet \\\"%s\\\" : \\\"%s\\\"\"%(alias,url))\n logger.error(ae)\n return changes_present", "def load_from_excel(self, excel_fp: str):\n # TODO:\n pass", "def initWorkbook(outfile):\n workbook = xlsxwriter.Workbook(outfile)\n return workbook", "def main():\n with excel_app() as app:\n print(app)\n input(\"press enter to continue ...\")\n with excel_book(app, r'E:\\scratch\\Hello1.xls') as book:\n print(book.Worksheets[0].Range(\"A1\").Value)\n book.Worksheets[0].Range(\"A2\").Value = str(datetime.date.today())\n input(\"press enter to continue ...\")\n book.Save()", "def get_or_create_worksheet(sh, name):\n try:\n return sh.worksheet(name)\n except WorksheetNotFound:\n return sh.add_worksheet(title=name, rows=1, cols=1)", "def build(*args, **kwargs):\n\n\treturn Excel(*args, **kwargs)", "def __init__(self, filename=None):\n self.name = filename\n self.wb = None\n if os.path.exists(filename):\n try:\n self.wb = xlrd.open_workbook(filename)\n except:\n print(\"not an excel file\")\n else:\n self.set_amiSheetNames()\n self.filename = os.path.splitext(os.path.abspath(filename))[0]\n else:\n print(\"not a file\")", "def set_user_defined_sheet_name():\n global sheet\n probable_sheets = []\n\n machine_nr = input('Veuillez entrer un numéro de machine (Vide si recherche par C.) : ')\n invoice_nr = input('Veuillez entrer un numéro de C. : ')\n workbook = load_workbook(filename='./temp_excel.xlsm')\n sheets = workbook.sheetnames\n\n # Research amongs all sheets (There can be a lot)\n for ii in sheets:\n if machine_nr in ii and invoice_nr in ii:\n sheet = workbook[ii]\n break\n elif machine_nr in ii:\n probable_sheets.append(ii)\n elif invoice_nr in ii:\n probable_sheets.append(ii)\n\t \n\n # If no exact corresponding sheet is found\n if not sheet and probable_sheets != []:\n print('Aucune feuille ne correspond totalement à votre recherche, mais certaines s\\'en rapprochent :')\n i = 0\n for ii in probable_sheets:\n print(f'{i} : {ii}')\n i+=1\n print('99 pour quitter')\n choice = input('Faites un choix :')\n\n # Let the user exit the script\n if choice == 'q':\n sys.exit()\n else:\n sheet = workbook[probable_sheets[int(choice)]]", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()" ]
[ "0.69280106", "0.6738904", "0.6419168", "0.6310797", "0.605773", "0.6051629", "0.6044591", "0.6008777", "0.5982114", "0.5974647", "0.59715974", "0.5966216", "0.59524924", "0.59430736", "0.5873261", "0.5856575", "0.57233405", "0.5704794", "0.569487", "0.5689535", "0.5683357", "0.56626683", "0.5650288", "0.5643976", "0.56220996", "0.5621131", "0.5582671", "0.557879", "0.5575977", "0.5569652", "0.5567806", "0.55528384", "0.5536501", "0.5514255", "0.551264", "0.5502293", "0.55009633", "0.54940444", "0.5486658", "0.5484722", "0.54712766", "0.54655486", "0.54533255", "0.54446626", "0.54190254", "0.54154164", "0.5411645", "0.5407858", "0.54061085", "0.5384565", "0.53834254", "0.5380825", "0.53719556", "0.5364869", "0.5353064", "0.5349423", "0.5344671", "0.5335036", "0.5334431", "0.53295416", "0.53288215", "0.5322968", "0.53129596", "0.5312474", "0.5309922", "0.5289707", "0.5265525", "0.5264809", "0.5264325", "0.526006", "0.52571434", "0.5256673", "0.525286", "0.52450466", "0.5238357", "0.5235213", "0.5232096", "0.52084166", "0.52024746", "0.52019185", "0.5196821", "0.51917136", "0.51891536", "0.5186534", "0.5168698", "0.516854", "0.5162107", "0.51611435", "0.5157047", "0.5140927", "0.51372", "0.5126876", "0.5125427", "0.51189417", "0.51047176", "0.5095549", "0.50942296", "0.50920385", "0.50891507", "0.5088053" ]
0.77836186
0
creates an xml structure with root and motherelements
def createxmlmall(): root = ET.Element("state") model = ET.SubElement(root, "model") model.text = r"" dataid = ET.SubElement(root, "dataids") application = ET.SubElement(root, "application") application.text = "SIBS Configurator" safecookie = ET.SubElement(root, "safecookie") steps = ET.SubElement(root, "steps") prev = ET.SubElement(steps, "prev") lastproxy = ET.SubElement(root, "last-proxy").text = "tcserver0" tree = ET.ElementTree(root) # saves tree in variable "tree" return tree, safecookie, steps, prev
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()", "def create_roots(self):\n self.root = SchemaNode.element(\"nmt:netmod-tree\",\n interleave=False, occur=2)\n self.confdata = SchemaNode.element(\"nmt:top\", self.root,\n interleave=True, occur=2)\n self.rpcs = SchemaNode.element(\"nmt:rpc-methods\", self.root,\n interleave=False, occur=2)\n self.notifications = SchemaNode.element(\"nmt:notifications\", self.root,\n interleave=True, occur=2)", "def build(self):\n root = ET.Element(\"package\", **self.attr)\n self.build_meta(root)\n self.build_manifest(root)\n self.build_spine(root)\n return root", "def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n element.attrib['description'] = self.description\r\n\r\n return element", "def CreateKmlDoc():\n\n kml_doc = xml.dom.minidom.Document()\n kml_element = kml_doc.createElementNS('http://www.opengis.net/kml/2.2', 'kml')\n kml_element.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n kml_element = kml_doc.appendChild(kml_element)\n document = kml_doc.createElement('Document')\n kml_element.appendChild(document)\n return kml_doc", "def _create_nrml():\n return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)", "def createElements(self):\n if self.__builder.checkRootTag(self.__content):\n elements = self.__content.findall(\"*\")\n\n for el in elements:\n self.parseXml(el, {})\n\n return self.__builder.getRoot()\n else:\n print(\"The Element \", self.__content.tag, \" is unkown.\")\n return None", "def build(self):\n root = ET.Element(\"container\", xmlns=self.namespace,\n version=self.version)\n rfs = ET.SubElement(root, \"rootfiles\")\n attrs = {\"full-path\": self.full_path, \"media-type\": self.media_type, }\n dummy = ET.SubElement(rfs, # pragma pylint: disable=W0612\n \"rootfile\", **attrs)\n # pragma pylint: enable=W0612\n return root", "def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()", "def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it", "def build(self):\n root = ET.Element(\"html\", xmlns=self.xmlns)\n self.build_head(root)\n self.build_body(root)\n return root", "def gen_tree(path):\n # print(\"CALLING.. Tree\")\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(path, parser)\n root = tree.getroot() \n return root, tree", "def build(self):\n root = ET.Element(\"ncx\", xmlns=self.namespace, version=self.version)\n head = ET.SubElement(root, \"head\")\n ET.SubElement(head, \"meta\",\n content=\"urn:uuid:%s\" % self.bookid,\n name=\"dtb:uid\",\n )\n ET.SubElement(head, \"meta\",\n content=\"1\",\n name=\"dtb:depth\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:totalPageCount\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:maxPageNumber\",\n )\n doctitle = ET.SubElement(root, \"docTitle\")\n ET.SubElement(doctitle, \"text\").text = self.title\n navmap = ET.SubElement(root, \"navMap\")\n seq = 1\n for sid, label, src in self.items:\n navpt = ET.SubElement(navmap, \"navPoint\", id=sid,\n playOrder=str(seq))\n navlabel = ET.SubElement(navpt, \"navLabel\")\n ET.SubElement(navlabel, \"text\").text = label\n ET.SubElement(navpt, \"content\", src=src)\n seq += 1\n return root", "def generate_xml_tree(self):\n try:\n tree = et.parse(self.file)\n self.root = tree.getroot()\n self.blast_output = self.root[8]\n self.iteration = self.blast_output[0]\n self.iteration_hit = self.iteration[4]\n\n for i in self.iteration_hit:\n self.hits.append(i)\n\n for i in self.hits:\n h = []\n for j in i:\n h.append(j)\n\n for hsp in h[5]:\n procent = \"{0:.2f}\".format(int(hsp[10].text) / int(hsp[13].text) * 100)\n procent = float(procent)\n self.aligns.append(Alignment(h[2].text,\n hsp[1].text,\n procent,\n hsp[12].text,\n hsp[10].text,\n hsp[13].text,\n hsp[14].text,\n hsp[15].text,\n hsp[16].text))\n self.main_alignments.append(MainAlignment(i[2].text,\n self.aligns))\n self.aligns = []\n except IndexError:\n \"Bad file.\"", "def __create_document(self):\n doc = xml.dom.minidom.Document()\n kml = doc.createElement('kml')\n kml.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n doc.appendChild(kml)\n document = doc.createElement('Document')\n kml.appendChild(document)\n docName = doc.createElement('name')\n document.appendChild(docName)\n docName_text = doc.createTextNode(self['name'])\n docName.appendChild(docName_text)\n docDesc = doc.createElement('description')\n document.appendChild(docDesc)\n docDesc_text = doc.createTextNode(self['description'])\n docDesc.appendChild(docDesc_text)\n return doc", "def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)", "def getXML(self):\n\n def _getElementForMappingEntry(entry, mappingStyle):\n xmlDocTmp = Document()\n element = xmlDocTmp.createElement(mappingStyle)\n for k, v in viewitems(entry):\n # ignore empty, None or compiled regexp items into output\n if not v or (k == \"path-match-expr\"):\n continue\n element.setAttribute(k, str(v))\n return element\n\n xmlDoc = Document()\n root = xmlDoc.createElement(\"storage-mapping\") # root element name\n for mappingStyle, mappings in viewitems(self):\n for mapping in mappings:\n mapElem = _getElementForMappingEntry(mapping, mappingStyle)\n root.appendChild(mapElem)\n return root.toprettyxml()", "def generate_xml(self, movement):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n xml_movement = ET.SubElement(xmlroot, 'movement')\n\n ET.SubElement(xml_movement, \"location_from\").text = format_locname(movement.location_from)\n ET.SubElement(xml_movement, \"location_to\").text = format_locname(movement.location_to)\n ET.SubElement(xml_movement, \"movement_id\").text = format_locname(movement.id)\n ET.SubElement(xml_movement, \"created_at\").text \\\n = unicode(movement.created_at.strftime('%Y-%m-%d %H:%M:%S'))\n\n xml_product = ET.SubElement(xml_movement, 'product')\n for fieldname in ['artnr', 'name', 'einheit', 'ean', 'products_per_export_package',\n 'pallet_height']:\n ET.SubElement(xml_product, fieldname).text = u''\n\n xml_unit = ET.SubElement(xml_movement, 'unit')\n ET.SubElement(xml_unit, \"mui\").text = unicode(movement.mui)\n ET.SubElement(xml_unit, 'height').text = unicode(movement.unit_height)\n ET.SubElement(xml_unit, 'quantity').text = unicode(movement.quantity)\n ET.SubElement(xml_unit, 'created_at').text = movement.unit_created_at.strftime('%Y-%m-%d %H:%M:%S')\n return xmlroot", "def __init__(self, xml_file, root_name, tags=[]):\n self.xml_file = xml_file\n self.tree = ET.ElementTree(ET.Element(root_name))\n self.root = self.tree.getroot()\n\n for tag in tags:\n self.root.set(tag[0], tag[1])", "def _preprocess(self, shapes):\n # Add root element\n root = Element('root')\n root.append(\n Comment('Generated xml shapes')\n )\n\n # Add elements for each shape\n for shape in shapes:\n root.append(\n Comment('Generated shape: {name}'.format(name=shape.shape_name()))\n )\n\n child = SubElement(root, 'shape', { 'attr-1': 'attribute content' })\n child.set('attr-b', 'other attribute content')\n child.text = 'tag content'\n\n return root", "def generate_xml(self, locations):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n kernel = Kerneladapter()\n\n for locname in locations:\n xml_location = ET.SubElement(xmlroot, 'location')\n location = kernel.location_info(locname)\n ET.SubElement(xml_location, \"location\").text = unicode(locname)\n ET.SubElement(xml_location, \"height\").text = unicode(location['height'])\n ET.SubElement(xml_location, \"attributes\").text = unicode(location['attributes'])\n ET.SubElement(xml_location, \"floorlevel\").text = unicode(location['floorlevel'])\n ET.SubElement(xml_location, \"preference\").text = unicode(location['preference'])\n ET.SubElement(xml_location, \"info\").text = unicode(location['info'])\n ET.SubElement(xml_location, \"reserved_for\").text = unicode(location['reserved_for'])\n\n for mui in location['allocated_by']:\n unit = kernel.unit_info(mui)\n xml_unit = ET.SubElement(xml_location, \"unit\")\n ET.SubElement(xml_unit, \"mui\").text = unicode(unit['mui'])\n ET.SubElement(xml_unit, \"quantity\").text = unicode(unit['quantity'])\n ET.SubElement(xml_unit, \"artnr\").text = unicode(unit['product'])\n ET.SubElement(xml_unit, \"height\").text = unicode(unit['height'])\n ET.SubElement(xml_unit, \"pick_quantity\").text = unicode(unit['pick_quantity'])\n ET.SubElement(xml_unit, 'created_at').text = unit['created_at'].strftime('%Y-%m-%d %H:%M:%S')\n ET.SubElement(xml_unit, \"movements\").text = unicode(unit['movements'])\n ET.SubElement(xml_unit, \"picks\").text = unicode(unit['picks'])\n ET.SubElement(xml_unit, \"attributes\").text = unicode(unit['attributes'])\n try:\n product = produktpass.models.Product.objects.get(artnr=unit['product'])\n ET.SubElement(xml_unit, \"product_name\").text = unicode(product.name)\n except produktpass.models.Product.DoesNotExist:\n ET.SubElement(xml_unit, \"product_name\").text = '???'\n\n return xmlroot", "def create_osm_tree():\n osm = etree.Element(\"osm\", {'version': '0.6', 'generator': 'create-legend'})\n osm.append(etree.Element(\"bounds\", {'minlat': '-85', 'maxlat': '85', 'minlon': '-180', 'maxlon': '180'}))\n return etree.ElementTree(osm)", "def start_serialization(self):\n self.xml = SimplerXMLGenerator(self.stream, self.options.get(\"encoding\", settings.DEFAULT_CHARSET))\n self.xml.startDocument()\n self.xml.startElement(\"xliff\", {\n \"version\": \"1.2\",\n \"xmlns\": \"urn:oasis:names:tc:xliff:document:1.2\",\n \"xmlns:d\": \"https://docs.djangoproject.com/\"\n })", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)", "def create_xml_patient(self, data=None):\n data = data or {}\n now = datetime.datetime.now()\n delta = datetime.timedelta(days=random.randint(1, 10) * -1)\n enrolled = now - delta\n delta = datetime.timedelta(days=random.randint(1, 10))\n next_visit = now + delta\n defaults = {\n 'Subject_Number': self.random_string(10),\n 'Pin_Code': self.random_number_string(4),\n 'Date_Enrolled': enrolled.strftime('%b %d %Y '),\n 'Next_Visit': next_visit.strftime('%b %d %Y '),\n 'Mobile_Number': '12223334444',\n }\n defaults.update(data)\n empty_items = [k for k, v in defaults.iteritems() if not v]\n for item in empty_items:\n del defaults[item]\n root = self._node('Table')\n for key, value in defaults.iteritems():\n root.append(self._node(key, value))\n return root", "def saveToXml(self) -> org.jdom.Element:\n ...", "def xml_item(cls, item):\n xml = cls.xml_root_open(item)\n xml += cls.xml_add_links(item)\n xml += cls.xml_dict(item)\n xml += cls.xml_root_close()\n return xml", "def build_xml(self, **kwargs):\r\n\r\n # Retrieve keyward arguments\r\n question_text = kwargs.get('question_text', '')\r\n explanation_text = kwargs.get('explanation_text', '')\r\n script = kwargs.get('script', None)\r\n num_responses = kwargs.get('num_responses', 1)\r\n num_inputs = kwargs.get('num_inputs', 1)\r\n\r\n # The root is <problem>\r\n root = etree.Element(\"problem\")\r\n\r\n # Add a script if there is one\r\n if script:\r\n script_element = etree.SubElement(root, \"script\")\r\n script_element.set(\"type\", \"loncapa/python\")\r\n script_element.text = str(script)\r\n\r\n # The problem has a child <p> with question text\r\n question = etree.SubElement(root, \"p\")\r\n question.text = question_text\r\n\r\n # Add the response(s)\r\n for i in range(0, int(num_responses)):\r\n response_element = self.create_response_element(**kwargs)\r\n root.append(response_element)\r\n\r\n # Add input elements\r\n for j in range(0, int(num_inputs)):\r\n input_element = self.create_input_element(**kwargs)\r\n if not (None == input_element):\r\n response_element.append(input_element)\r\n\r\n # The problem has an explanation of the solution\r\n if explanation_text:\r\n explanation = etree.SubElement(root, \"solution\")\r\n explanation_div = etree.SubElement(explanation, \"div\")\r\n explanation_div.set(\"class\", \"detailed-solution\")\r\n explanation_div.text = explanation_text\r\n\r\n return etree.tostring(root)", "def __init__(self, output, encoding='utf-8', short_empty_elements=True):\n document = XMLGenerator(output, encoding) # Python 3.2 : short_empty_elements\n document.startDocument()\n self._document = document\n self._output = output\n self._encoding = encoding\n self._short_empty_elements = short_empty_elements\n self._open_elements = []\n return", "def get_xml(self):\n\t\t# get the XML description of the VM\n\t\tvm_xml = self.clonezilla_vm_obj.XMLDesc(0)\n\t\troot = ET.fromstring(vm_xml)\n\t\treturn root", "def wrez2xml(self,newdoc,newroot):\n\t\twrez = newdoc.createElement('wrez')\n\t\twrez.setAttribute('hasChanged', str(self.hasChanged))\n\t\tnewroot.appendChild(wrez)\n\n\t\tpath = newdoc.createElement('path')\n\t\tpath.setAttribute('value', self.path)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('init_str')\n\t\tpath.setAttribute('value', self.init_str)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('hash_sha512')\n\t\tpath.setAttribute('value', self.hash_sha512)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('src_rip')\n\t\tpath.setAttribute('value', self.src_rip)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('quality')\n\t\tpath.setAttribute('value', self.quality)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('codec')\n\t\tpath.setAttribute('value', self.codec)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('language')\n\t\tpath.setAttribute('value', self.language)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('audio')\n\t\tpath.setAttribute('value', self.audio)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('encoder')\n\t\tpath.setAttribute('value', self.encoder)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('version')\n\t\tpath.setAttribute('value', self.version)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('extension')\n\t\tpath.setAttribute('value', self.extension)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('release_year')\n\t\tpath.setAttribute('value', self.release_year)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('title')\n\t\tpath.setAttribute('value', self.title)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('size')\n\t\tpath.setAttribute('value', str(self.size))\n\t\twrez.appendChild(path)\n\t\treturn wrez", "def nxroot():\n with h5py.File('dummy.nxs', mode='w', driver=\"core\", backing_store=False) as f:\n root = make_group(f)\n root.create_class('entry', NXentry)\n yield root", "def generate(self, info):\n root = ET.Element(\"libs\")\n\n # Set target\n target = ET.SubElement(root, \"target\")\n target.text = info.target\n\n # Set time info\n time_start = ET.SubElement(root, \"start_time\")\n time_start.text = info.start_time.strftime(\"%H-%m-%Y %H:%M:%S\")\n\n time_end = ET.SubElement(root, \"end_time\")\n time_end.text = info.end_time.strftime(\"%H-%m-%Y %H:%M:%S\")\n\n # WordPress info\n wordpress = ET.SubElement(root, \"wordpress\")\n wordpress.set(\"current_version\", info.wordpress_info.current_version)\n wordpress.set(\"last_version\", info.wordpress_info.latest_version)\n\n # Set CVE\n if info.wordpress_info.vulnerabilities:\n cves = ET.SubElement(wordpress, \"cves\")\n for cve in info.wordpress_info.vulnerabilities:\n xml_cve = ET.SubElement(cves, \"cve\")\n xml_cve.text = cve\n\n # Plugins info\n plugins = ET.SubElement(root, \"plugins\")\n for plugin in info.plugins:\n xml_plugin = ET.SubElement(plugins, \"plugin\")\n xml_plugin.text = plugin.plugin_name\n\n xml_plugin.set(\"current_version\", plugin.current_version)\n xml_plugin.set(\"last_version\", plugin.latest_version)\n xml_plugin.set(\"url\", plugin.plugin_uri)\n xml_plugin.set(\"outdated\", \"Yes\" if plugin.is_outdated else \"No\")\n\n # Set CVE\n if plugin.cves:\n cves = ET.SubElement(xml_plugin, \"cves\")\n for cve in plugin.cves:\n xml_cve = ET.SubElement(cves, \"cve\")\n xml_cve.text = cve\n\n # Set exploits\n if plugin.cves:\n exploits = ET.SubElement(xml_plugin, \"exploits\")\n for exploit in plugin.exploits:\n xml_exploit = ET.SubElement(exploits, \"exploits\")\n xml_exploit.text = exploit\n\n return root", "def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())", "def get_xml(self):\n xml = svgwrite.etree.etree.Element(self.elementname)\n if self.debug:\n self.validator.check_all_svg_attribute_values(self.elementname, self.attribs)\n for attribute, value in self.attribs.items():\n # filter 'None' values\n if value is not None:\n value = self.value_to_string(value)\n if value: # just add not empty attributes\n xml.set(attribute, value)\n \n for element in self.elements:\n xml.append(element)\n return xml", "def serialize(self, root):", "def __set_xml():\n if len(activity) == 0:\n\n OS = DRIVER.OS\n xml_path = os.path.join(prjDir, \"config\", \"element_android.xml\")\n if OS == \"iOS\":\n xml_path = os.path.join(prjDir, \"config\", \"element_iOS.xml\")\n\n # open the xml file\n per = elementTree.parse(xml_path)\n all_element = per.findall('activity')\n\n for firstElement in all_element:\n activity_name = firstElement.get(\"name\")\n\n element = {}\n\n for secondElement in firstElement.getchildren():\n element_name = secondElement.get(\"name\")\n\n element_child = {}\n for thirdElement in secondElement.getchildren():\n\n element_child[thirdElement.tag] = thirdElement.text\n\n element[element_name] = element_child\n activity[activity_name] = element", "def xml(self):\n rough_string = ElementTree.tostring(self.dom, \"utf-8\")\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def __init__(self):\n self.elementName=\"\"\n self.elementText=\"\"\n self.attrib={}\n self.xml=\"\"", "def create_xml_regression(lfiles, lsbj, foxml):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n e = doc.createElement('subject')\n e.setAttribute('id', 'case')\n\n for i, fn in enumerate(lfiles):\n v = doc.createElement('visit')\n v.setAttribute('id', \"subj{}\".format(i))\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', \"face\")\n t = doc.createTextNode(fn)\n f.appendChild(t)\n\n a = doc.createElement('age')\n x = doc.createTextNode(str(lsbj[i][\"age\"]))\n a.appendChild(x)\n\n\n v.appendChild(f)\n v.appendChild(a)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def build(root):", "def createXML(config, ccdpars, userpars):\n\n # identify the template\n appLab = ccdpars.appLab.value()\n if config.debug:\n print('DEBUG: createXML: application = ' + appLab)\n print('DEBUG: createXML: application vals = ' + str(config.templates[appLab]))\n\n if config.template_from_server:\n # get template from server\n url = config.http_camera_server + config.http_path_get + '?' + \\\n config.http_search_attr_name + '=' + config.templates[appLab]\n if config.debug:\n print ('DEBUG: url = ' + url)\n sxml = urllib2.urlopen(url).read()\n txml = ET.fromstring(sxml)\n else:\n # get template from local file\n if config.debug:\n print ('DEBUG: directory = ' + config.template_directory)\n lfile = os.path.join(config.template_directory, config.templates[appLab]['app'])\n if config.debug:\n print ('DEBUG: local file = ' + lfile)\n tree = ET.parse(lfile)\n txml = tree.getroot()\n\n # Find all CCD parameters\n cconfig = txml.find('configure_camera')\n pdict = {}\n for param in cconfig.findall('set_parameter'):\n pdict[param.attrib['ref']] = param.attrib\n\n # Set them. This is designed so that missing \n # parameters will cause exceptions to be raised.\n\n # X-binning factor\n pdict['X_BIN']['value'] = ccdpars.xbin.get()\n\n # Y-binning factor\n pdict['X_BIN']['value'] = ccdpars.ybin.get()\n\n # Number of exposures\n pdict['NUM_EXPS']['value'] = '-1' if ccdpars.number.value() == 0 else ccdpars.number.get()\n\n # LED level\n pdict['LED_FLSH']['value'] = ccdpars.led.get()\n\n # Avalanche or normal\n pdict['OUTPUT']['value'] = str(ccdpars.avalanche())\n\n # Avalanche gain\n pdict['HV_GAIN']['value'] = ccdpars.avgain.get()\n\n # Clear or not\n pdict['EN_CLR']['value'] = str(ccdpars.clear())\n\n # Dwell\n pdict['DWELL']['value'] = ccdpars.expose.get()\n\n # Readout speed\n pdict['SPEED']['value'] = '0' if ccdpars.readout == 'Slow' else '1' \\\n if ccdpars.readout == 'Medium' else '2'\n\n # Number of windows -- needed to set output parameters correctly\n nwin = ccdpars.nwin.value()\n\n # Load up enabled windows, null disabled windows\n for nw, win in ccdpars.wframe.wins:\n if nw < nwin:\n pdict['X' + str(nw+1) + '_START']['value'] = win.xstart.get()\n pdict['Y' + str(nw+1) + '_START']['value'] = win.ystart.get()\n pdict['X' + str(nw+1) + '_SIZE']['value'] = win.nx.get()\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = win.ny.get()\n else:\n pdict['X' + str(nw+1) + '_START']['value'] = '1'\n pdict['Y' + str(nw+1) + '_START']['value'] = '1'\n pdict['X' + str(nw+1) + '_SIZE']['value'] = '0'\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = '0'\n\n # Load the user parameters\n uconfig = txml.find('user')\n uconfig.set('target', userpars.target.get())\n uconfig.set('comment', userpars.comment.get())\n uconfig.set('ID', userpars.progid.get())\n uconfig.set('PI', userpars.pi.get())\n uconfig.set('Observers', userpars.observers.get())\n \n return txml", "def __init__(self):\n self.elementName = \"\"\n self.elementText = \"\"\n self.attrib = {}\n self.xml = \"\"", "def get_structure():\n\n root = Container()\n\n a = Model()\n root['a'] = a\n\n sub = Container()\n root['sub'] = sub\n\n b = Model()\n sub['b'] = b\n sub.attr = b\n\n return root", "def to_etree(self):\n\n # Top-level root block\n attr = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n\n if self.arch_id is not None:\n attr[\"architecture_id\"] = self.arch_id\n if self.netlist_id is not None:\n attr[\"atom_netlist_id\"] = self.netlist_id\n\n root = ET.Element(\"block\", attr)\n\n # Top-level ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n xml_ports.text = \" \".join(self.ports[tag])\n root.append(xml_ports)\n\n # CLB blocks\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n root.append(xml_block)\n\n return root", "def __init__(self, title, description=''):\r\n self.kml_doc = xml.dom.minidom.Document()\r\n kml = self.kml_doc.createElement('kml')\r\n kml.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\r\n self.kml_doc.appendChild(kml)\r\n document = self.kml_doc.createElement('Document')\r\n kml.appendChild(document)\r\n docName = self.kml_doc.createElement('name')\r\n document.appendChild(docName)\r\n docName_text = self.kml_doc.createTextNode(title)\r\n docName.appendChild(docName_text)\r\n docDesc = self.kml_doc.createElement('description')\r\n document.appendChild(docDesc)\r\n docDesc_text = self.kml_doc.createTextNode(description)\r\n docDesc.appendChild(docDesc_text)", "def write(self):\n temp_string = minidom.parseString(ET.tostring(self.root)).toprettyxml(encoding=\"UTF-8\")\n with open(self.xml_file, 'w') as f:\n f.write(temp_string)\n # f = open(self.xml_file, \"w\")\n # f.write(temp_string)\n # f.close()", "def writexml(file):\n OUTFILE=open(file,\"w\")\n doc = xml.dom.minidom.Document()\n\n # Create the <dec_reg_list> base element\n decl_reg_list = doc.createElement(\"decl_reg_list\")\n doc.appendChild(decl_reg_list)\n\n regname_old=\"\"\n rows.pop(0)\n for row in rows:\n (regdesc,regname,offset,default,regtype,expose_reg,depth,incsz,bitdesc,bitname,loc,bittype)= row\n if regname != regname_old:\n # Create the register element\n register = doc.createElement(\"register\")\n register.setAttribute(\"name\", regname)\n register.setAttribute(\"offset\", offset)\n if default != \"\" : register.setAttribute(\"default\", default)\n register.setAttribute(\"type\", regtype)\n if expose_reg == \"1\": register.setAttribute(\"usr\", expose_reg)\n if depth != \"\": register.setAttribute(\"size\", depth)\n if incsz != \"\": register.setAttribute(\"incsz\", incsz)\n text = doc.createTextNode(regdesc)\n register.appendChild(text)\n decl_reg_list.appendChild(register)\n \n # Create the field element\n if bitname != \"\":\n field = doc.createElement(\"field\")\n field.setAttribute(\"name\", bitname)\n if loc !=\"\": field.setAttribute(\"loc\", addcolon(loc))\n if bittype != \"\": field.setAttribute(\"type\", bittype)\n if bitdesc != \"\":\n text = doc.createTextNode(bitdesc)\n field.appendChild(text)\n register.appendChild(field)\n regname_old = regname\n\n\n # Print our newly created XML\n #print doc.toprettyxml(indent=\" \")\n #OUTFILE.write(doc.saveXML(decl_reg_list))\n OUTFILE.write(doc.toprettyxml(indent=\" \"))\n OUTFILE.close()", "def to_xml(self) -> str:\n # default name and stuff setup\n element_root, xml_tree = super()._add_basics()\n element_root = element_root.find('elementProp')\n element_root = element_root.find('collectionProp')\n for element in list(element_root):\n try:\n if element.attrib['name'] == 'influxdbUrl':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.influx_db_url:\n elem.text = self.influx_db_url\n elif element.attrib['name'] == 'application':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.application:\n elem.text = self.application\n elif element.attrib['name'] == 'measurement':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.measurement:\n elem.text = self.application\n elif element.attrib['name'] == 'summaryOnly':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value':\n elem.text = str(self.summary_only).lower()\n elif element.attrib['name'] == 'samplersRegex':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.samplers_regexp:\n elem.text = self.samplers_regexp\n elif element.attrib['name'] == 'percentiles':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.percentiles:\n elem.text = self.percentiles\n elif element.attrib['name'] == 'testTitle':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.test_title:\n elem.text = self.test_title\n elif element.attrib['name'] == 'eventTags':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.event_tags:\n elem.text = self.event_tags\n except Exception:\n raise Exception(f'Unable to render xml from {type(self).__class__}')\n return tree_to_str(xml_tree, hashtree=True)", "def add_output_metadata(root):\n jss_connection = JSSConnection.get()\n report_date = ET.SubElement(root, \"ReportDate\")\n report_date.text = datetime.datetime.strftime(datetime.datetime.now(),\n \"%Y%m%d-%H%M%S\")\n report_server = ET.SubElement(root, \"Server\")\n report_server.text = jss_connection.base_url\n api_user = ET.SubElement(root, \"APIUser\")\n api_user.text = jss_connection.user\n report_user = ET.SubElement(root, \"LocalUser\")\n report_user.text = os.getenv(\"USER\")\n spruce_version = ET.SubElement(root, \"SpruceVersion\")\n spruce_version.text = __version__\n python_jss_version = ET.SubElement(root, \"python-jssVersion\")\n python_jss_version.text = jss.__version__\n ET.SubElement(root, \"Removals\")", "def toXML(self, document, rootElement):\n\n\t\trigidRotorElement = document.createElement('rigidRotor', rootElement)\n\t\tlinear = 'yes' if self.linear else 'no'\n\t\tdocument.createAttribute('linear', rigidRotorElement, linear)\n\t\tdocument.createQuantity('frequencies', rigidRotorElement, self.frequencies, 'cm^-1')", "def createXMLFile(list, stock_symbol, market):\n \n stock = ET.Element(\"stock\")\n \n stock.set(\"source\", 'yahoo finance')\n exchange = ET.SubElement(stock, \"exchange\")\n exchange.set(\"market\", market)\n \n for s in list: \n \n if s.array[0] == 'Date' or list[0].array[0] != 'Date':\n continue\n dividend_date = ET.SubElement(exchange, \"dividend_date\")\n dividend_date.set(\"date\", s.array[0])\n \n price = ET.SubElement(dividend_date, \"price\")\n price.text = s.array[1]\n \n \n indent(stock)\n tree = ET.ElementTree(stock)\n \n tree.write(\"dividend_history.xml\", xml_declaration=True, encoding='utf-8', method=\"xml\")\n print 'xml created for ' + stock_symbol", "def _toxml_rec(self, root, obj=None, ns_cur=None):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n try:\n\n if (obj == None):\n obj = self._client.factory.create(root)\n\n ns = '{%s}' % self._get_element_ns(obj.__class__.__name__)\n if (ns != '{None}' and ns != ns_cur):\n doc = Element(ns + root)\n else:\n doc = Element(root)\n ns = ns_cur\n\n for key in obj.__keylist__:\n subelem = obj[key]\n\n if (subelem == None):\n SubElement(doc, key).text = '?'\n elif (subelem == [] or '[]' in subelem.__str__()):\n inner_doc = self._toxml_rec(key, None, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n else:\n el_type = self._get_element_type(\n subelem.__class__.__name__)\n if (el_type == 'Simple'):\n SubElement(doc, key).text = '?'\n elif (el_type == 'Complex'):\n inner_doc = self._toxml_rec(key, subelem, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n\n return doc\n\n except TypeNotFound:\n return None", "def to_etree(self):\n\n # Base block element\n attrib = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n if not self.is_leaf:\n attrib[\"mode\"] = self.mode if self.mode is not None else \"default\"\n\n elem = ET.Element(\"block\", attrib)\n\n # If this is an \"open\" block then skip the remaining tags\n if self.name == \"open\":\n return elem\n\n # Attributes / parameters\n if self.is_leaf:\n for tag, data in zip([\"attributes\", \"parameters\"], [self.attributes, self.parameters]):\n xml_list = ET.Element(tag)\n\n sub_tag = tag[:-1]\n for key, value in data.items():\n xml_item = ET.Element(sub_tag, {\"name\": key})\n xml_item.text = value\n xml_list.append(xml_item)\n\n elem.append(xml_list)\n\n # Ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n port_type = tag[:-1]\n\n keys = self.ports.keys()\n for key in keys:\n port = self.ports[key]\n if port.type == port_type:\n # Encode port\n xml_port = port.to_etree()\n xml_ports.append(xml_port)\n\n # Rotation map\n if port.rotation_map:\n # Encode\n rotation = []\n for i in range(port.width):\n rotation.append(str(port.rotation_map.get(i, \"open\")))\n\n # Make an element\n xml_rotation_map = ET.Element(\"port_rotation_map\", {\"name\": port.name})\n xml_rotation_map.text = \" \".join(rotation)\n xml_ports.append(xml_rotation_map)\n\n elem.append(xml_ports)\n\n # Recurse\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n elem.append(xml_block)\n\n return elem", "def toXML(self, document, rootElement):\n\t\thinderedRotorElement = document.createElement('hinderedRotor', rootElement)\n\t\tdocument.createQuantity('frequency', hinderedRotorElement, self.frequency, 'cm^-1')\n\t\tdocument.createQuantity('barrier', hinderedRotorElement, self.barrier, 'cm^-1')\n\t\tdocument.createTextElement('degeneracy', hinderedRotorElement, str(self.degeneracy))", "def build(self):\n\n children = filter( lambda n: n.nodeType == n.ELEMENT_NODE, self.dom.childNodes[0].childNodes)\n for node in children:\n try:\n s = self.declare(node)\n except DeclareError as de:\n # is it the document info block\n if de.nodeType != \"docInfo\":\n raise de\n # has it got children, this is the only empty info block we allow\n if not node.hasChildNodes():\n continue\n # does it contain embedded xml, i.e. HTML tags\n if node.getAttribute(\"type\") == \"xml\":\n self.bumfText = \"\".join(map(lambda n: n.toxml(), node.childNodes))\n else:\n self.bumfText = node.childNodes[0].data\n else:\n self.data.append(s)\n\n # finally fix the typedefs (a bit like aliasing really)\n self.fixupTypedefs()", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def xml(self, time_taken, out, err):\n test_suite = ET.Element('testsuite')\n test_suite.set('errors', str(len(self.errors)))\n test_suite.set('failures', str(len(self.failures)))\n test_suite.set('name', self._test_name)\n test_suite.set('tests', str(self.testsRun))\n test_suite.set('time', '%.3f' % time_taken)\n for info in self._tests:\n test_suite.append(info.xml())\n system_out = ET.SubElement(test_suite, 'system-out')\n system_out.text = cdata(self.filter_nonprintable_text(out))\n system_err = ET.SubElement(test_suite, 'system-err')\n system_err.text = cdata(self.filter_nonprintable_text(err))\n return ET.ElementTree(test_suite)", "def generate_xml(self, provisioning):\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return self.provisioning2xml(provisioning)", "def create_xml_server(self, server, dev_list, server_metadata={}):\n \n #get if operating system is Windows \n windows_os = False\n os_type = server_metadata.get('os_type', None)\n if os_type == None and 'metadata' in dev_list[0]:\n os_type = dev_list[0]['metadata'].get('os_type', None)\n if os_type != None and os_type.lower() == \"windows\":\n windows_os = True\n #get type of hard disk bus \n bus_ide = True if windows_os else False \n bus = server_metadata.get('bus', None)\n if bus == None and 'metadata' in dev_list[0]:\n bus = dev_list[0]['metadata'].get('bus', None)\n if bus != None:\n bus_ide = True if bus=='ide' else False\n \n self.xml_level = 0\n\n text = \"<domain type='kvm'>\"\n #get topology\n topo = server_metadata.get('topology', None)\n if topo == None and 'metadata' in dev_list[0]:\n topo = dev_list[0]['metadata'].get('topology', None)\n #name\n name = server.get('name','') + \"_\" + server['uuid']\n name = name[:58] #qemu impose a length limit of 59 chars or not start. Using 58\n text += self.inc_tab() + \"<name>\" + name+ \"</name>\"\n #uuid\n text += self.tab() + \"<uuid>\" + server['uuid'] + \"</uuid>\" \n \n numa={}\n if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:\n numa = server['extended']['numas'][0]\n #memory\n use_huge = False\n memory = int(numa.get('memory',0))*1024*1024 #in KiB\n if memory==0:\n memory = int(server['ram'])*1024;\n else:\n if not self.develop_mode:\n use_huge = True\n if memory==0:\n return -1, 'No memory assigned to instance'\n memory = str(memory)\n text += self.tab() + \"<memory unit='KiB'>\" +memory+\"</memory>\" \n text += self.tab() + \"<currentMemory unit='KiB'>\" +memory+ \"</currentMemory>\"\n if use_huge:\n text += self.tab()+'<memoryBacking>'+ \\\n self.inc_tab() + '<hugepages/>'+ \\\n self.dec_tab()+ '</memoryBacking>'\n\n #cpu\n use_cpu_pinning=False\n vcpus = int(server.get(\"vcpus\",0))\n cpu_pinning = []\n if 'cores-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['cores-source'])):\n cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )\n vcpus += 1\n if 'threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['threads-source'])):\n cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )\n vcpus += 1\n if 'paired-threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['paired-threads-source'])):\n cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )\n cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )\n vcpus += 2\n \n if use_cpu_pinning and not self.develop_mode:\n text += self.tab()+\"<vcpu placement='static'>\" +str(len(cpu_pinning)) +\"</vcpu>\" + \\\n self.tab()+'<cputune>'\n self.xml_level += 1\n for i in range(0, len(cpu_pinning)):\n text += self.tab() + \"<vcpupin vcpu='\" +str(cpu_pinning[i][0])+ \"' cpuset='\" +str(cpu_pinning[i][1]) +\"'/>\"\n text += self.dec_tab()+'</cputune>'+ \\\n self.tab() + '<numatune>' +\\\n self.inc_tab() + \"<memory mode='strict' nodeset='\" +str(numa['source'])+ \"'/>\" +\\\n self.dec_tab() + '</numatune>'\n else:\n if vcpus==0:\n return -1, \"Instance without number of cpus\"\n text += self.tab()+\"<vcpu>\" + str(vcpus) + \"</vcpu>\"\n\n #boot\n boot_cdrom = False\n for dev in dev_list:\n if dev['type']=='cdrom' :\n boot_cdrom = True\n break\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='x86_64' machine='pc'>hvm</type>\"\n if boot_cdrom:\n text += self.tab() + \"<boot dev='cdrom'/>\" \n text += self.tab() + \"<boot dev='hd'/>\" + \\\n self.dec_tab()+'</os>'\n #features\n text += self.tab()+'<features>'+\\\n self.inc_tab()+'<acpi/>' +\\\n self.tab()+'<apic/>' +\\\n self.tab()+'<pae/>'+ \\\n self.dec_tab() +'</features>'\n if windows_os or topo==\"oneSocket\":\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>\"% vcpus\n else:\n text += self.tab() + \"<cpu mode='host-model'></cpu>\"\n text += self.tab() + \"<clock offset='utc'/>\" +\\\n self.tab() + \"<on_poweroff>preserve</on_poweroff>\" + \\\n self.tab() + \"<on_reboot>restart</on_reboot>\" + \\\n self.tab() + \"<on_crash>restart</on_crash>\"\n text += self.tab() + \"<devices>\" + \\\n self.inc_tab() + \"<emulator>/usr/libexec/qemu-kvm</emulator>\" + \\\n self.tab() + \"<serial type='pty'>\" +\\\n self.inc_tab() + \"<target port='0'/>\" + \\\n self.dec_tab() + \"</serial>\" +\\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='serial' port='0'/>\" + \\\n self.dec_tab()+'</console>'\n if windows_os:\n text += self.tab() + \"<controller type='usb' index='0'/>\" + \\\n self.tab() + \"<controller type='ide' index='0'/>\" + \\\n self.tab() + \"<input type='mouse' bus='ps2'/>\" + \\\n self.tab() + \"<sound model='ich6'/>\" + \\\n self.tab() + \"<video>\" + \\\n self.inc_tab() + \"<model type='cirrus' vram='9216' heads='1'/>\" + \\\n self.dec_tab() + \"</video>\" + \\\n self.tab() + \"<memballoon model='virtio'/>\" + \\\n self.tab() + \"<input type='tablet' bus='usb'/>\" #TODO revisar\n\n#> self.tab()+'<alias name=\\'hostdev0\\'/>\\n' +\\\n#> self.dec_tab()+'</hostdev>\\n' +\\\n#> self.tab()+'<input type=\\'tablet\\' bus=\\'usb\\'/>\\n'\n if windows_os:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes'/>\"\n else:\n #If image contains 'GRAPH' include graphics\n #if 'GRAPH' in image:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>\" +\\\n self.inc_tab() + \"<listen type='address' address='0.0.0.0'/>\" +\\\n self.dec_tab() + \"</graphics>\"\n\n vd_index = 'a'\n for dev in dev_list:\n bus_ide_dev = bus_ide\n if dev['type']=='cdrom' or dev['type']=='disk':\n if dev['type']=='cdrom':\n bus_ide_dev = True\n text += self.tab() + \"<disk type='file' device='\"+dev['type']+\"'>\"\n if 'file format' in dev:\n text += self.inc_tab() + \"<driver name='qemu' type='\" +dev['file format']+ \"' cache='none'/>\"\n if 'source file' in dev:\n text += self.tab() + \"<source file='\" +dev['source file']+ \"'/>\"\n #elif v['type'] == 'block':\n # text += self.tab() + \"<source dev='\" + v['source'] + \"'/>\"\n #else:\n # return -1, 'Unknown disk type ' + v['type']\n vpci = dev.get('vpci',None)\n if vpci == None:\n vpci = dev['metadata'].get('vpci',None)\n text += self.pci2xml(vpci)\n \n if bus_ide_dev:\n text += self.tab() + \"<target dev='hd\" +vd_index+ \"' bus='ide'/>\" #TODO allows several type of disks\n else:\n text += self.tab() + \"<target dev='vd\" +vd_index+ \"' bus='virtio'/>\" \n text += self.dec_tab() + '</disk>'\n vd_index = chr(ord(vd_index)+1)\n elif dev['type']=='xml':\n dev_text = dev['xml']\n if 'vpci' in dev:\n dev_text = dev_text.replace('__vpci__', dev['vpci'])\n if 'source file' in dev:\n dev_text = dev_text.replace('__file__', dev['source file'])\n if 'file format' in dev:\n dev_text = dev_text.replace('__format__', dev['source file'])\n if '__dev__' in dev_text:\n dev_text = dev_text.replace('__dev__', vd_index)\n vd_index = chr(ord(vd_index)+1)\n text += dev_text\n else:\n return -1, 'Unknown device type ' + dev['type']\n\n net_nb=0\n bridge_interfaces = server.get('networks', [])\n for v in bridge_interfaces:\n #Get the brifge name\n self.db_lock.acquire()\n result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )\n self.db_lock.release()\n if result <= 0:\n print \"create_xml_server ERROR getting nets\",result, content\n return -1, content\n #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM\n #I know it is not secure \n #for v in sorted(desc['network interfaces'].itervalues()):\n model = v.get(\"model\", None)\n if content[0]['provider']=='default':\n text += self.tab() + \"<interface type='network'>\" + \\\n self.inc_tab() + \"<source network='\" +content[0]['provider']+ \"'/>\"\n elif content[0]['provider'][0:7]=='macvtap':\n text += self.tab()+\"<interface type='direct'>\" + \\\n self.inc_tab() + \"<source dev='\" + self.get_local_iface_name(content[0]['provider'][8:]) + \"' mode='bridge'/>\" + \\\n self.tab() + \"<target dev='macvtap0'/>\"\n if windows_os:\n text += self.tab() + \"<alias name='net\" + str(net_nb) + \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:6]=='bridge':\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.get_local_iface_name(content[0]['provider'][7:])+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n elif model==None:\n model = \"virtio\"\n else:\n return -1, 'Unknown Bridge net provider ' + content[0]['provider']\n if model!=None:\n text += self.tab() + \"<model type='\" +model+ \"'/>\"\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n \n net_nb += 1\n\n interfaces = numa.get('interfaces', [])\n\n net_nb=0\n for v in interfaces:\n if self.develop_mode: #map these interfaces to bridges\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.develop_bridge_iface+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n else:\n text += self.tab() + \"<model type='e1000'/>\" #e1000 is more probable to be supported than 'virtio'\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n continue\n \n if v['dedicated'] == 'yes': #passthrought\n text += self.tab() + \"<hostdev mode='subsystem' type='pci' managed='yes'>\" + \\\n self.inc_tab() + \"<source>\"\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</hostdev>'\n net_nb += 1\n else: #sriov_interfaces\n #skip not connected interfaces\n if v.get(\"net_id\") == None:\n continue\n text += self.tab() + \"<interface type='hostdev' managed='yes'>\"\n self.inc_tab()\n if v.get('mac_address', None) != None:\n text+= self.tab() + \"<mac address='\" +v['mac_address']+ \"'/>\"\n text+= self.tab()+'<source>'\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n if v.get('vlan',None) != None:\n text += self.tab() + \"<vlan> <tag id='\" + str(v['vlan']) + \"'/> </vlan>\"\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</interface>'\n\n \n text += self.dec_tab()+'</devices>'+\\\n self.dec_tab()+'</domain>'\n return 0, text", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def toXMLElement(self):\n # NOTE Subclasses should call Component.toXMLElement(self) to obtain\n # base node and then add further attributes and sub-elements\n return ET.Element(self.__class__.__name__)", "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def init_rss_dom_structure(url):\n doc = xml.dom.minidom.Document()\n root_elmt = doc.createElement('rss')\n root_elmt.setAttribute('version', '2.0')\n doc.appendChild(root_elmt)\n\n channel_elmt = doc.createElement('channel')\n root_elmt.appendChild(channel_elmt)\n\n ttl_elmt = doc.createElement('ttl')\n ttl_txt = doc.createTextNode(str(24*60)) # One day TTL (24 * 60 minutes)\n ttl_elmt.appendChild(ttl_txt)\n channel_elmt.appendChild(ttl_elmt)\n\n title_elmt = doc.createElement('title')\n title_txt = doc.createTextNode('ATBTCT RSS feed for the CT log at {}'.format(url))\n title_elmt.appendChild(title_txt)\n channel_elmt.appendChild(title_elmt)\n\n desc_elmt = doc.createElement('description')\n desc_txt = doc.createTextNode(\n 'References the list of torrents that one can add to its BitTorrent client in order to get an archive of the CT log at {}.'.format(url)\n )\n desc_elmt.appendChild(desc_txt)\n channel_elmt.appendChild(desc_elmt)\n\n link_elmt = doc.createElement('link')\n link_txt = doc.createTextNode('https://github.com/X-Cli/ATBTCT')\n link_elmt.appendChild(link_txt)\n channel_elmt.appendChild(link_elmt)\n\n return doc, channel_elmt", "def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def site2nrml(model, params_dict): \n \"\"\"\n # Some XML definitions\n NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'\n GML_NAMESPACE = 'http://www.opengis.net/gml'\n SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} \n gml_ns = SERIALIZE_NS_MAP['gml']\n \"\"\"\n \n # Head matter \n root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'})\n root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4')\n root.append(etree.Comment('%s' % '%s site model' %(model)))\n \n\n # Define Site Model Name \n sMod = etree.SubElement(root, \"siteModel\")\n sMod.set('name', model + ' Site Model')\n \n # Define sub element\n \n for key in params_dict:\n \n site = etree.SubElement(sMod, \"site\")\n site.set('lon', '%s' % key[0])\n site.set('lat', '%s' % key[1])\n site.set('vs30', '%s' % params_dict[key][0])\n site.set('vs30Type', '%s' % 'inferred')\n site.set('z1pt0', '%s' % '%3.3f' % float(params_dict[key][1]))\n site.set('z2pt5', '%s' % '%3.3f' % float(params_dict[key][2]))\n \n #print(getMinMax(params_dict))\n \n # Form tree and write to xml\n root_tree = etree.ElementTree(root)\n outFile = open((out_directory + '/' + out_filename), 'wb')\n root_tree.write(outFile, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)", "def ToXMLElement(self):\n root_element = xml.etree.ElementTree.Element('dspl')\n\n if self.namespace:\n root_element.set('targetNamespace', self.namespace)\n\n # Add namespace and imports\n root_element.set('xmlns',\n 'http://schemas.google.com/dspl/2010')\n\n for import_obj in self.imports:\n root_element.set('xmlns:%s' % import_obj.namespace_id,\n import_obj.namespace_url)\n root_element.append(import_obj.ToXMLElement())\n\n # Basic dataset information\n dataset_info = xml.etree.ElementTree.Element('info')\n\n dataset_name = xml.etree.ElementTree.Element('name')\n dataset_name.append(_ValueOrPlaceHolder(self.name, 'DATASET NAME'))\n dataset_info.append(dataset_name)\n\n dataset_description = xml.etree.ElementTree.Element('description')\n dataset_description.append(\n _ValueOrPlaceHolder(self.description, 'DATASET DESCRIPTION'))\n dataset_info.append(dataset_description)\n\n dataset_url = xml.etree.ElementTree.Element('url')\n dataset_url.append(\n _ValueOrPlaceHolder(self.url, 'DATASET URL'))\n dataset_info.append(dataset_url)\n\n root_element.append(dataset_info)\n\n # Provider information\n provider_info = xml.etree.ElementTree.Element('provider')\n\n provider_name = xml.etree.ElementTree.Element('name')\n provider_name.append(\n _ValueOrPlaceHolder(self.provider_name, 'PROVIDER NAME'))\n provider_info.append(provider_name)\n\n provider_url = xml.etree.ElementTree.Element('url')\n provider_url.append(\n _ValueOrPlaceHolder(self.provider_url, 'PROVIDER URL'))\n provider_info.append(provider_url)\n\n root_element.append(provider_info)\n\n # Add topic info\n if self.topics:\n topic_elements = xml.etree.ElementTree.Element('topics')\n\n for topic in self.topics:\n topic_elements.append(topic.ToXMLElement())\n\n root_element.append(topic_elements)\n\n # Add concept info\n concept_elements = xml.etree.ElementTree.Element('concepts')\n\n for concept in self.concepts:\n if not concept.concept_reference:\n concept_elements.append(concept.ToXMLElement())\n\n root_element.append(concept_elements)\n\n # Add slices\n slice_elements = xml.etree.ElementTree.Element('slices')\n\n for data_slice in self.slices:\n slice_elements.append(data_slice.ToXMLElement(self))\n\n root_element.append(slice_elements)\n\n # Add table info\n table_elements = xml.etree.ElementTree.Element('tables')\n\n for table in self.tables:\n table_elements.append(table.ToXMLElement())\n\n root_element.append(table_elements)\n\n return root_element", "def export_to_xml(self, block, xmlfile):\n root = etree.Element(\"unknown_root\", nsmap=XML_NAMESPACES)\n tree = etree.ElementTree(root)\n block.add_xml_to_node(root)\n # write asides as children\n for aside in self.get_asides(block):\n if aside.needs_serialization():\n aside_node = etree.Element(\"unknown_root\", nsmap=XML_NAMESPACES)\n aside.add_xml_to_node(aside_node)\n block.append(aside_node)\n tree.write(xmlfile, xml_declaration=True, pretty_print=True, encoding='utf-8')", "def test_basic_xml(self):\n j2k = Jp2k(self.j2kfile)\n\n self.jp2h.box = [self.ihdr, self.colr]\n\n doc = ET.parse(BytesIO(b'<?xml version=\"1.0\"?><data>0</data>'))\n xmlb = glymur.jp2box.XMLBox(xml=doc)\n self.assertEqual(ET.tostring(xmlb.xml.getroot()),\n b'<data>0</data>')\n\n boxes = [self.jp2b, self.ftyp, self.jp2h, xmlb, self.jp2c]\n\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as tfile:\n j2k.wrap(tfile.name, boxes=boxes)\n jp2 = Jp2k(tfile.name)\n self.assertEqual(jp2.box[3].box_id, 'xml ')\n self.assertEqual(ET.tostring(jp2.box[3].xml.getroot()),\n b'<data>0</data>')", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def create_XML(directives, gui): \n # unpack the directives\n commands = directives.command_list\n Delay = directives.delay_time\n Ascii_delay = directives.ascii_time \n addr = directives.addr\n \n # Start XML\n aardvark = ET.Element('aardvark')\n \n # starup comment for historical reasons\n aardvark.append(ET.Comment('Configuration (Need pullups, ' + \n 'not sure why...)')) \n \n # Configuration Element\n config_attributes = {'i2c': str(int(pySCPI_aardvark.I2C)),\n 'spi': str(int(pySCPI_aardvark.SPI)),\n 'gpio': str(int(pySCPI_aardvark.GPIO)),\n 'pullups': str(int(pySCPI_aardvark.Pullups))}\n \n ET.SubElement(aardvark, 'configure', config_attributes)\n \n # Bitrate\n rate_attributes = {'khz': str(pySCPI_aardvark.Bitrate)}\n \n ET.SubElement(aardvark, 'i2c_bitrate', rate_attributes)\n \n # Start I2C\n ET.SubElement(aardvark, 'i2c_free_bus')\n \n # delay attributes\n delay_attributes = {'ms': str(Delay)} \n ascii_delay_attributes = {'ms': str(Ascii_delay)} \n \n # delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n \n # iterate through commands\n for command in commands: \n \n if pySCPI_config.is_config(command):\n # add the configuration to the XML\n addr = update_XML(command, addr, aardvark)\n \n elif pySCPI_config.is_valid_raw(command):\n # it is a valid raw command so comment the command\n aardvark.append(ET.Comment(command))\n \n # split the command up\n raw_list = command[:-1].split(' ')\n raw_addr = '0x' + raw_list[1][2:-1]\n \n # determine the type of raw command it is\n if pySCPI_config.is_raw_write(command):\n write_attributes = {'addr': raw_addr,\n 'count': str(len(raw_list)-1),\n 'radix': str(pySCPI_aardvark.radix)}\n raw = ET.SubElement(aardvark, 'i2c_write',\n write_attributes)\n \n # add hexidecimal null terminated command as \n # text to the write element\n raw.text = ' '.join(\"{:02x}\".format(int(c, 16)) for \\\n c in raw_list[2:]) + ' 0a'\n \n else:\n read_attributes = {'addr': raw_addr,\n 'count': raw_list[2],\n 'radix': str(pySCPI_aardvark.radix)} \n \n ET.SubElement(aardvark, 'i2c_read', \n read_attributes) \n # end if\n \n # intermessage delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n \n else:\n # this is a regular command so comment the SCPI command\n aardvark.append(ET.Comment(command))\n \n # define attributes for write element\n write_attributes = {'addr': addr,\n 'count': str(len(command)+1),\n 'radix': str(pySCPI_aardvark.radix)}\n \n # create write element if it is not a comment\n if not command.startswith('#'):\n scpi = ET.SubElement(aardvark,'i2c_write',write_attributes)\n \n # add hexidecimal null terminated command as \n # text to the write element\n scpi.text = ' '.join(\"{:02x}\".format(ord(c)) for \\\n c in command) + ' 0a' \n # end if\n \n \n \n if ('TEL?' in command) and not command.startswith('#'):\n # Read command was issued so a read needs to be performed\n \n if command.endswith('ascii'):\n # leave a longer delay for ascii commands\n ET.SubElement(aardvark, 'sleep', \n ascii_delay_attributes)\n \n else:\n # regular delay\n ET.SubElement(aardvark, 'sleep', delay_attributes)\n # end if\n \n # extract length from command\n command_len = pySCPI_formatting.read_length(command, gui) \n \n # define attributes for read element\n read_attributes = {'addr': addr,\n 'count': str(command_len),\n 'radix': str(pySCPI_aardvark.radix)} \n \n # create the read element\n ET.SubElement(aardvark, 'i2c_read', read_attributes) \n # end if\n \n # delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n # end if\n\n #end for \n \n # beautify the xml\n file_string = beautify_xml(aardvark)\n \n # open window for saving the file\n file_opt = options = {}\n options['defaultextension'] = '.xml'\n options['filetypes'] = [('xml files', '.xml')]\n options['initialdir'] = os.getcwd() + '\\\\xml_files'\n options['initialfile'] = 'aardvark_script.xml'\n options['title'] = 'Save .xml file as:' \n \n # get the file name from the user\n filename = TKFD.asksaveasfilename(**file_opt)\n \n # see if the user selected a file or not\n if (filename != ''): \n # a file was selected so open file for writing\n \n if pySCPI_config.file_is_free(filename): \n myfile = open(filename, 'w+')\n \n # write file\n myfile.write(file_string)\n myfile.write('\\n')\n \n # close file\n myfile.close() \n \n print 'XML file \\''+ filename.split('/')[-1]+'\\' written'\n \n else:\n print '*** Requested XML file is open in another program ***'\n \n else: \n # no file was selected\n print '*** No XML file written ***'\n # end if\n \n return filename", "def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def toxml(self, root, outfile=None, envelope=False):\n\n try:\n\n self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(\n 'datagen_xmlgen_write_sample'), self._mh.fromhere())\n ev = event.Event('xmlgen_before_write', root, outfile, envelope)\n if (self._mh.fire_event(ev) > 0):\n root = ev.argv(0)\n outfile = ev.argv(1)\n envelope = ev.argv(2)\n\n if (ev.will_run_default()):\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n if (envelope):\n ns = '{%s}' % 'http://schemas.xmlsoap.org/soap/envelope/'\n doc = Element(ns + 'Envelope')\n SubElement(doc, 'Header')\n body = SubElement(doc, 'Body')\n body.append(self._toxml_rec(root))\n else:\n doc = self._toxml_rec(root)\n\n outfile = 'sample.xml' if (outfile == None) else outfile\n with open(outfile, 'w') as f:\n f.write(tostring(\n doc, encoding='UTF-8', xml_declaration=True, pretty_print=True).decode())\n\n self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(\n 'datagen_xmlgen_sample_written', outfile), self._mh.fromhere())\n ev = event.Event('xmlgen_after_write')\n self._mh.fire_event(ev)\n\n return True\n\n except (Exception, ValueError) as ex:\n self._mh.demsg(\n 'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())\n return False", "def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1", "def gen_paula_etree(paula_id):\n E = ElementMaker(nsmap=NSMAP)\n tree = E('paula', version='1.1')\n tree.append(E('header', paula_id=paula_id))\n return E, tree", "def build_mocap_root():\n\n if not mc.objExists('skel_GRP'):\n raise RuntimeError('Skel_GRP not found!')\n\n if mc.objExists('guides_REF'):\n mc.delete('guides_REF')\n\n jnts = utils.get_children('skel_GRP', ad=1)\n mc.rename('skel_GRP', 'joints_GRP')\n mc.createNode('transform', n='rig_GRP')\n mc.parent('joints_GRP', 'rig_GRP')\n\n # create sets\n sets = ['rig_GRP']\n sets.append(create_cache_set())\n sets.append(mc.sets(jnts, n='bindJoints_SEL'))\n sets.append(mc.sets(jnts, n='control_SEL'))\n sets.append(mc.sets(jnts, n='engine_SEL'))\n\n mc.sets(sets, n='rig_SEL')", "def test_append_to_root():\n result = parse_xml(\"<lol><first>text 1</first><first>text 2</first></lol>\")\n expected = {'lol': {'first': ['text 1', 'text 2']}}\n\n assert_equals(result, expected)", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def makeNewXml(self):\n new_entry = self.begin_entry + self.begin_ul\n for i in self.commit_msgs:\n i = escape(i)\n li = self.begin_li + i + self.end_li\n new_entry = new_entry + li\n new_entry = new_entry + self.end_ul + self.end_entry\n self.new_xml = atom.core.XmlElementFromString(new_entry)", "def buildnemxml(self):\n for n in sorted(self._objs.keys()):\n emanenode = self._objs[n]\n emanenode.buildnemxmlfiles(self)", "def to_xml(self):\n \n root = ET.Element(\"Document\")\n root.set('xmlns',\"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02\")\n root_fito = ET.SubElement(root, \"FIToFICstmrCdtTrf\")\n \n self.xml_header(root_fito)\n self.xml_transaction(root_fito)\n\n ET.ElementTree(root)\n \n return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def toXML(self, document, rootElement):\n\t\tharmonicOscillatorElement = document.createElement('harmonicOscillator', rootElement)\n\t\tdocument.createQuantity('frequency', harmonicOscillatorElement, self.frequency, 'cm^-1')\n\t\tdocument.createTextElement('degeneracy', harmonicOscillatorElement, str(self.degeneracy))", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace(\"\\n\", os.linesep).encode())", "def create(cls, xml):\n raise Exception('Not Implemented Yet')", "def _generate_xml(self, body, destn_dir, nodes=True):\n fn = ''.join([random.choice(string.ascii_letters) for _ in range(12)])\n fn += '.xml'\n\n _dir = os.path.dirname(os.path.abspath(__file__))\n _tmpl = 'multi_node.template' if nodes else 'single_node.template'\n _env = Environment(autoescape=False,\n loader=FileSystemLoader(_dir),\n trim_blocks=False)\n\n with open(fn, 'w+') as f:\n o = _env.get_template(_tmpl).render(body)\n f.write(o)\n\n _d = destn_dir + '/' + fn\n self._remote_copy(fn, _d)\n # Remove the XML file created locally\n os.remove(fn)\n\n return _d", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def _generateCoordinatesXML(self, node):\n\n self._setTagList(self, node, ['email',])\n # Phones\n phone_tag_list = ['phone', 'cellphone']\n phone_list = []\n for tag in phone_tag_list:\n value = getattr(self, \"%s\" %(tag), None)\n if value:\n phone_list.append(value)\n phone_list.sort()\n for phone in phone_list:\n element = etree.SubElement(node, 'phone')\n element.text = phone\n\n # Fax\n value = getattr(self, \"fax\", None)\n if value:\n element = etree.SubElement(node, 'fax')\n element.text = value\n\n # Address\n address_tag_list = ['street', 'zip', 'city', 'country']\n address = None\n for address_tag in address_tag_list:\n value = getattr(self, \"%s\" %(address_tag), None)\n if value:\n if address is None:\n address = etree.SubElement(node, 'address')\n element = etree.SubElement(address, address_tag)\n element.text = value", "def create_xml_atlas(lfiles, foxml, oid=\"face\"):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n for i, fn in enumerate(lfiles):\n e = doc.createElement('subject')\n e.setAttribute('id', \"subj{}\".format(i))\n\n v = doc.createElement('visit')\n v.setAttribute('id', \"experiment\")\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', oid)\n\n t = doc.createTextNode(os.path.abspath(fn))\n\n f.appendChild(t)\n v.appendChild(f)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def createXtree( self, position, level=_ROOT, parent=\"\" ):\n queue = self.__tree[position].fpointer\n mrmlId = self.__tree[position].identifier\n\n output = ' ' * 8 + mrmlId + ' = new X.mesh();\\n'\n\n if not level == _ROOT:\n\n n = slicer.mrmlScene.GetNodeByID( mrmlId )\n if n.IsA( 'vtkMRMLModelNode' ):\n\n # grab some properties\n s = n.GetStorageNode()\n if not s:\n # error\n raise Exception( 'Scene not saved!' )\n\n file = s.GetFileName()\n if not file:\n # error\n raise Exception( 'Scene not saved!' )\n\n d = n.GetDisplayNode()\n color = str( list(d.GetColor()) )\n opacity = str( d.GetOpacity() )\n visible = str( bool( d.GetVisibility() ) ).lower()\n\n if self.__copyFiles:\n fileName = os.path.split( file )[1]\n shutil.copy( file, os.path.join( self.__outputDir, fileName ) )\n \n file = os.path.split( file )[1]\n\n output += ' ' * 8 + mrmlId + '.file = \"' + file + '\";\\n'\n output += ' ' * 8 + mrmlId + '.color = ' + color + ';\\n'\n output += ' ' * 8 + mrmlId + '.opacity = ' + opacity + ';\\n'\n output += ' ' * 8 + mrmlId + '.visible = ' + visible + ';\\n'\n\n if self.__captionMode == 1:\n # From Model Name\n output += ' ' * 8 + mrmlId + '.caption = \"' + n.GetName() + '\";\\n'\n elif self.__captionMode == 2:\n # From Parent\n parentNode = slicer.util.getNode( parent )\n if parentNode:\n output += ' ' * 8 + mrmlId + '.caption = \"' + parentNode.GetName() + '\";\\n'\n\n output += ' ' * 8 + parent + '.children.push(' + mrmlId + ');\\n\\n'\n\n level += 1\n for element in queue:\n output += self.createXtree( element, level, mrmlId ) # recursive call\n\n return output", "def create_xml_file(cur, body_json, country_json):\n print(\"Creating XML file...\")\n xml_state = ET.Element(body_json['state'])\n xml_year = ET.SubElement(xml_state, 'y' + body_json['year'])\n xml_genre = ET.SubElement(xml_year, body_json['genre'])\n print(\"Going over all the albums that were baught in the state '%s' \"\n \"In the year '%s', in the genre '%s'\" %\n (body_json['state'], body_json['year'], body_json['genre']))\n for album in country_json[body_json['state']]:\n cur.execute('SELECT '\n 'SUM(invoice_items.quantity) FROM tracks '\n 'INNER JOIN albums ON albums.albumid = tracks.albumid '\n 'INNER JOIN genres ON genres.genreid = tracks.genreid '\n 'INNER JOIN invoice_items ON invoice_items.trackid = tracks.trackid '\n 'INNER JOIN invoices ON invoices.invoiceid = invoice_items.invoiceid '\n 'WHERE albums.Title=\"%s\" AND genres.name=\"%s\" AND '\n 'invoices.BillingCountry = \"%s\" AND strftime(\"%%Y\", InvoiceDate) = \"%s\";' %\n (album, body_json['genre'], body_json['state'], body_json['year']))\n data = cur.fetchall()\n if data[0][0]:\n ET.SubElement(xml_genre, album).text = str(data[0][0])\n else:\n ET.SubElement(xml_genre, album).text = str(0)\n\n tree = ET.ElementTree(xml_state)\n tree.write(\"country_albums.xml\")\n return xml_state", "def create_tags(tag_dict, o_tree):\n for i, o in tag_dict.items():\n subtag1 = o_tree.find(o[0])\n subtag2 = etree.Element(i)\n subtag1.addnext(subtag2)\n o_tree.write(f'{output_path}/ppt/presentation.xml', pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n return", "def buildxml2(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml2()\")\n # on master, control network bridge added earlier in startup()\n ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)\n self.buildplatformxml2(ctrlnet)\n self.buildnemxml()\n self.buildeventservicexml()", "def _get_ome_xml_root(self) -> ElementTree:\n ome_metadata_element = ElementTree.fromstring(self._ome_metadata)\n tree = ElementTree.ElementTree(ome_metadata_element)\n return tree.getroot()", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def ToXMLElement(self):\n table_element = xml.etree.ElementTree.Element('table')\n table_element.set('id', self.table_id)\n\n for column in self.columns:\n table_element.append(column.ToXMLElement())\n\n table_data = xml.etree.ElementTree.Element('data')\n table_data_file = xml.etree.ElementTree.Element('file')\n table_data_file.set('encoding', 'utf-8')\n table_data_file.set('format', 'csv')\n table_data_file.text = self.file_name\n\n table_data.append(table_data_file)\n\n table_element.append(table_data)\n\n return table_element", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace('\\n', os.linesep))", "def build_manifest(self, root):\n manifest = ET.SubElement(root, \"manifest\")\n for sid, href, media_type in self.manifest:\n args = {\"id\": sid, \"href\": href, \"media-type\": media_type}\n ET.SubElement(manifest, \"item\", **args) # pylint: disable-msg=W0142", "def gen_wtml(base_dir, depth, **kwargs):\n kwargs.setdefault('FolderName', 'Toasty')\n kwargs.setdefault('BandPass', 'Visible')\n kwargs.setdefault('Name', 'Toasty map')\n kwargs.setdefault('Credits', 'Toasty')\n kwargs.setdefault('CreditsUrl', 'http://github.com/ChrisBeaumont/toasty')\n kwargs.setdefault('ThumbnailUrl', '')\n kwargs['url'] = base_dir\n kwargs['depth'] = depth\n\n template = ('<Folder Name=\"{FolderName}\">\\n'\n '<ImageSet Generic=\"False\" DataSetType=\"Sky\" '\n 'BandPass=\"{BandPass}\" Name=\"{Name}\" '\n 'Url=\"{url}/{{1}}/{{3}}/{{3}}_{{2}}.png\" BaseTileLevel=\"0\" '\n 'TileLevels=\"{depth}\" BaseDegreesPerTile=\"180\" '\n 'FileType=\".png\" BottomsUp=\"False\" Projection=\"Toast\" '\n 'QuadTreeMap=\"\" CenterX=\"0\" CenterY=\"0\" OffsetX=\"0\" '\n 'OffsetY=\"0\" Rotation=\"0\" Sparse=\"False\" '\n 'ElevationModel=\"False\">\\n'\n '<Credits> {Credits} </Credits>\\n'\n '<CreditsUrl>{CreditsUrl}</CreditsUrl>\\n'\n '<ThumbnailUrl>{ThumbnailUrl}</ThumbnailUrl>\\n'\n '<Description/>\\n</ImageSet>\\n</Folder>')\n return template.format(**kwargs)" ]
[ "0.6970181", "0.63764966", "0.6287799", "0.6225016", "0.6220165", "0.6217637", "0.6206258", "0.619495", "0.6160407", "0.6134908", "0.61195517", "0.6091069", "0.6088098", "0.60488194", "0.59819216", "0.59459764", "0.59355354", "0.5920509", "0.5884173", "0.5878814", "0.5870564", "0.5760805", "0.57490104", "0.5732845", "0.57127476", "0.57099485", "0.5706973", "0.56900024", "0.56886786", "0.5688089", "0.5685507", "0.56803477", "0.5677781", "0.5668971", "0.56644845", "0.5657979", "0.56472504", "0.5623326", "0.56051993", "0.5590867", "0.5565177", "0.55559593", "0.5549441", "0.5542936", "0.55424607", "0.55422115", "0.5537468", "0.55328953", "0.55295765", "0.55207163", "0.5518952", "0.5514079", "0.5509311", "0.5478515", "0.5472179", "0.5470781", "0.5453695", "0.5441356", "0.54386735", "0.543401", "0.5430775", "0.54270643", "0.5417141", "0.54063904", "0.54009885", "0.5395149", "0.5390411", "0.5385372", "0.5381315", "0.5379351", "0.53743756", "0.53670347", "0.53646237", "0.5352926", "0.5352577", "0.53438675", "0.5337794", "0.53306067", "0.5327577", "0.531757", "0.53134364", "0.53076184", "0.53076184", "0.53019947", "0.5295505", "0.5291731", "0.5287998", "0.52873254", "0.52873117", "0.52780455", "0.52697754", "0.52658945", "0.52617615", "0.5257348", "0.525327", "0.52525884", "0.5251969", "0.5241471", "0.5235605", "0.5227018" ]
0.75603426
0
Creates a folder and saves xml tree in a specific path
def save_xml(tree, file_name, folder_name): import os # ändrar plats för filer os.chdir(folder_name) tree.write(file_name) # Namnet på ny fil
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mkdir(path):", "def create_folder(path):\n command = ['mkdir', TEST_DIR]\n file_operation(path, command)", "def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def create_folder(path):\n if not exists(path):\n os.makedirs(path)", "def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path", "def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)", "def createFolder(self):\n raise NotImplementedError", "def create_folder(path: str):\n if not os.path.exists(path):\n os.makedirs(path)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def create_path(self, path):\n path_list = path.split(\"/\")\n done_path = self.parent_folder + \"/\"\n\n for directory in path_list:\n try:\n os.mkdir(done_path + directory + \"/\")\n except FileExistsError:\n done_path += directory + \"/\"", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def create_directories(self, path):\n os.makedirs(path)\n print('Directory created at:', path)\n return path", "def mkdir(self, path):\n os.mkdir(path)", "def make_dir(self, path):\n import os\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(self, path):\n try:\n postdata = codecs.encode(json.dumps({ 'dir': path }), 'utf-8')\n self._urlopen('/api/fileops/mkdir', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to create '{}'\".format(path))", "def createFolder(folder):\n folder_ = os.path.join(os.getcwd(),folder)\n if not(os.path.isdir(folder_)):\n os.mkdir(folder_)", "def create_folder(self, c_path):\n raise NotImplementedError", "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n\n # Simple use of make dir function\n os.mkdir(abspath(path))", "def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)", "def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)", "def createFolder(self):\n\n self.directory = \"D:\\\\CompositionHelper\"\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n print ('Created new folder')", "def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def createFolder(self, path):\n yield \"\\n\" # to avoid timeout\n log(\"creating folder\")\n if \"flix\" in path:\n yield \"%s\" % self.fileService.createFolder(path)\n return\n yield 0\n return", "def mkdir_p(cls, path):\n os.makedirs(path)", "def MakeDir(self, path: str) -> None:\n ...", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def mkpath(self, _path):\n self.mkdir(path.dirname(_path))", "def insert_graph_folder(path_mode):\n os.mkdir(path_mode + 'allegati/ck_points')", "def Create_my_dir(new_path): #Create paths (windows os)\r\n if not os.path.exists(new_path):\r\n os.makedirs(new_path);", "def gen_dir(layer, teste, root_folder):\r\n lyr = project.mapLayersByName(layer)[0]\r\n selection = get_lst_reprovado(lyr, teste)\r\n features = lyr.getFeatures()\r\n root_path = 'C:/' + root_folder + '_shp_result'\r\n\r\n os.mkdir(root_path)\r\n \r\n for feat in features:\r\n lote = feat['lote']\r\n municipio = feat['municipio']\r\n cod_emp = feat['cod_emp']\r\n lote_path = root_path + '/' + lote\r\n mun_path = lote_path + '/' + municipio\r\n imovel_path = mun_path + '/' + cod_emp\r\n \r\n if cod_emp in selection:\r\n if not os.path.exists(lote_path):\r\n os.makedirs(lote_path)\r\n \r\n if not os.path.exists(mun_path):\r\n os.makedirs(mun_path)\r\n \r\n os.mkdir(imovel_path)\r\n os.mkdir(imovel_path + '/shapefiles')", "def create_directory_structure(root):\n berlin = os.path.join(root, \"Berlin\",\"Berlin_test\")\n istanbul = os.path.join(root, \"Istanbul\",\"Istanbul_test\")\n moscow = os.path.join(root, \"Moscow\", \"Moscow_test\")\n try:\n os.makedirs(berlin)\n os.makedirs(istanbul)\n os.makedirs(moscow)\n except OSError:\n print(\"failed to create directory structure\")\n sys.exit(2)", "def create_folder(self, path: str, name: str, created_by: str = \"fusillade\") -> None:\n schema_facets = [dict(SchemaArn=self.schema, FacetName=\"NodeFacet\")]\n object_attribute_list = self.get_object_attribute_list(facet=\"NodeFacet\", name=name, created_by=created_by)\n try:\n cd_client.create_object(DirectoryArn=self._dir_arn,\n SchemaFacets=schema_facets,\n ObjectAttributeList=object_attribute_list,\n ParentReference=dict(Selector=path),\n LinkName=name)\n logger.info({\"message\": \"creating folder\", \"name\": name, \"path\": path})\n except cd_client.exceptions.LinkNameAlreadyInUseException:\n pass", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)", "def make_new_dir(path):\n\n if(not(os.path.isdir(path))):\n os.makedirs(path)\n\n return path", "def new_dir(folder):\n os.makedirs(folder, exist_ok=True)\n return folder", "def create_new(self, root, name_length):\n self.name = create_random_string(name_length)\n self.ctime = datetime.datetime.now()\n date_time = datetime.datetime.strftime(self.ctime, \"%Y%m%d_%H%M%S\")\n self.folder = f\"{date_time}_{self.name}\"\n self.path = os.path.join(root, self.folder)\n try:\n os.makedirs(self.path)\n print(f\"Created folder {self.folder}\")\n except OSError:\n print(f\"Directory {self.folder} already exists\")\n except:\n print(f\"Cannot create folder: {self.folder}\")\n raise", "def create_folder(path: str):\n try:\n Path(path).mkdir(parents=True, exist_ok=True)\n return True\n except:\n print(\"An error occured.\")", "def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def create_folder(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def create_dir_structure():\n LOG.info('In create_dir_structure')\n OutputWrite.change_to_script_directory(__file__)\n path = os.path.abspath(os.path.join('..', 'results',\n global_constants.TEXT_BOARD,\n global_constants.TEXT_INTERFACE,\n global_constants.TEXT_DEVICE,\n global_constants.TEST_EXECUTION_NAME\n ))\n LOG.debug('Path to be Created = {0}'.format(path))\n os.makedirs(path, exist_ok=True, mode=0o755)\n for item in global_constants.TEST_CASE_LIST_NAMES:\n in_path = os.path.exists(os.path.join(path, item))\n if not os.path.exists(in_path):\n LOG.debug('Path with Test Case name = {0}'.format(in_path))\n os.mkdir(in_path)\n LOG.debug('Path = {0}'.format(path))\n return path", "def save(self):\n try:\n return pmc.system.saveAs(self.path)\n except RuntimeError as err:\n log.warning(\"Missing directories in path. \"\n \"Creating directories now...\")\n self.folder_path.makedirs_p()\n return pmc.system.saveAs(self.path)", "def createPath(self, outPath):\n # Create new directory for output path\n try:\n os.mkdir(outPath)\n except OSError:\n print (\"Creation of the directory %s failed\" % outPath)\n else:\n print (\"Successfully created the directory %s \" % outPath)", "def create_tree(file, rep):\n try:\n if file is not None:\n rep = rep + '/' + file[0:4] + '/' + file[4:6] + '/' + file[6:8]\n if not exists(rep):\n makedirs(rep)\n move(file, rep)\n else:\n if not exists(rep + '/' + file):\n move(file, rep)\n else:\n print('Already exists!')\n except OSError:\n print('Argh! I could not create the directory!')", "def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")", "def create_tree(outFile, tree, path='/'):\n for key, foo in tree.list():\n if outFile.has_node(path, key):\n logging.debug('Path already found:', path, key)\n continue\n logging.debug('Creating group:', path, key)\n outFile.create_group(path, key, key)\n dest = path + key + '/'\n if outFile.has_node(dest):\n continue\n create_tree(outFile, tree.child(key), dest)", "def create_data_dir(path: Path):\n if not path.is_dir():\n path.mkdir(parents=True)", "def create_project_folder(self):\n\t\tif not os.path.exists(self.segment_path):\n\t\t\tfileutil.makedirs(self.segment_path)", "def create(self, basedir, outdir, name, prefix=None):", "def mkdir(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def createFolder(self):\n folderName, ok = QtWidgets.QInputDialog.getText(self, 'Folder Name', 'Enter the folder name :',\n QtWidgets.QLineEdit.Normal)\n\n if ok:\n parent = self.fileDir\n currentPath = self.dataDir\n if self.fileDir.selectedItems():\n parent = self.fileDir.selectedItems()[-1]\n currentPath = str(parent.toolTip(0))\n\n if not os.path.isdir('%s/%s' % (currentPath, str(folderName))):\n item = QtWidgets.QTreeWidgetItem(parent)\n\n item.setText(0, str(folderName))\n item.setToolTip(0, '%s/%s' % (currentPath, str(folderName)))\n\n # connect icon\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap('%s/folder.png' % (self.iconsDir)), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n item.setIcon(0, icon)\n\n # be careful about shiboken2, you can use 'is' and 'is not' instead of using operator '==' and '!='\n if parent is not self.fileDir:\n self.fileDir.setItemExpanded(parent, True)\n self.fileDir.setItemSelected(parent, False)\n\n self.fileDir.setItemSelected(item, True)\n\n os.makedirs('%s/%s' % (currentPath, str(folderName)))", "def create_tree(path, depth=DEPTH):\r\n os.mkdir(path)\r\n for i in range(NUM_FILES):\r\n filename = os.path.join(path, 'file{0:03}.txt'.format(i))\r\n with open(filename, 'wb') as f:\r\n f.write(b'foo')\r\n if depth <= 1:\r\n return\r\n for i in range(NUM_DIRS):\r\n dirname = os.path.join(path, 'dir{0:03}'.format(i))\r\n create_tree(dirname, depth - 1)", "def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path, exist_ok=True)", "def fs_create_dir(self, path):\n\t\treturn Job(SDK.PrlSrv_FsCreateDir(self.handle, path)[0])", "def mkfile(path):\n if os.path.exists(path):\n print(\"{} already exists.\".format(path))\n else:\n try:\n parent = os.path.abspath(os.path.join(path, os.pardir))\n os.makedirs(parent, exist_ok=True)\n open(path, 'a').close()\n except OSError:\n print(\"Uh oh - something went awry!\")\n else:\n print(\"Successfully created {}\".format(path))", "def mkdir(path: str):\n _fs().mkdir(path)", "def create_dir(dir_path):\n validate.check_python_ver(ver=3.5)\n from pathlib import Path\n Path(dir_path).mkdir(parents=True, exist_ok=True)", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content", "def _create_folder(file_path):\r\n file_base = os.path.dirname(file_path)\r\n if not os.path.exists(file_base):\r\n try:\r\n os.makedirs(file_base)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def create_dir(dir_path):\n\n if not path.exists(dir_path):\n log('Creating directory: {0}'.format(dir_path))\n run(sh.mkdir, dir_path, p=True)", "def Create_Dir(self,txn,filename):\n opid = self.new_opid()\n xaction = CreateDir_Operation(os.path.join(self.home,filename),opid)\n self._add_operation(txn,xaction)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)", "def create_directory(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(self, path):\r\n return self._call(\"-mkdir\", path)", "def mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _createDir(self) -> None:\n try:\n name = self._editItem.text()\n path = self._currPath.joinpath(name)\n if self._editItemType == 'folder':\n path.mkdir()\n elif self._editItemType == 'file':\n if len(self._model.findItems(name)) > 1:\n # this method of creating file does not prevent duplicates, so exception is raised manually\n raise FileExistsError\n with path.open('w+', encoding='utf-8'):\n pass\n self._listDirectories()\n createdItem = self._model.findItems(name)\n index = self._model.indexFromItem(createdItem[0])\n self._mainFileView.scrollTo(index)\n self._mainFileView.setCurrentIndex(index)\n except FileExistsError:\n self._statusBar.showMessage('File/folder with that name already exists!', 3000)\n self._listDirectories()\n except PermissionError:\n self._statusBar.showMessage('File/folder with that name could not be created!', 3000)\n self._listDirectories()", "def mkdir(self, path):\n self.log.debug(\"Local mkdir: %s\", path)\n # FIXME: dont set mode here, fix unittest mkdtemp instead\n os.makedirs(path, mode=0o700, exist_ok=True)", "def create_file(self, name, content=u'', folder=None):\n if folder is None:\n folder = self.rootdir\n\n path = os.path.join(folder, name)\n\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n\n with open(path, 'w', encoding='utf-8') as fhd:\n fhd.write(content)\n\n return path", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def create_folder_path(folder_path):\n try:\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path)\n os.makedirs(folder_path)\n except Exception:\n raise Error('Create {folder_path} exception'.format(folder_path))", "def mkdir(name_folder,path):\r\n folder_name = os.path.join(path,name_folder)\r\n if not os.path.exists(folder_name):\r\n os.mkdir(folder_name)\r\n else:\r\n raise NameError('path %s already exists'% path)", "def create_files(project_name, root_dir):\r\n root_dir = projectfolders.create_path(root_dir, project_name) #Modify the root\r\n \r\n write_setup(project_name, root_dir)\r\n write_inits(project_name, root_dir)\r\n write_tests(project_name, root_dir)", "def _mkdir(self, path):\r\n os.mkdir(path)\r\n self.addCleanup(shutil.rmtree, path)", "def mkdirs(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)", "def create_folder(path_folder, name_subfolder=None):\n if not name_subfolder:\n if not os.path.exists(path_folder):\n os.makedirs(path_folder)\n else:\n path_result_subolder = os.path.join(path_folder, name_subfolder)\n if not os.path.exists(path_result_subolder):\n os.makedirs(path_result_subolder)", "def createFolder(folderFullPath):\n os.makedirs(folderFullPath, exist_ok=True)", "def mkdir_p(path):\n\n if os.path.exists(path):\n return\n\n par = os.path.split(path)[0]\n if os.path.exists(par):\n os.mkdir(path)\n getLogger(__name__).debug('created directory: %s' % path)\n else:\n mkdir_p(par)\n os.mkdir(path)", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def make_folder(l: str) -> None:\n\n Path(l).mkdir(parents=True, exist_ok=True)\n\n return", "def create_folders():\n os.makedirs(GRID_DIR, exist_ok=True)", "def makes_path(pathfile):\n path = os.path.dirname(pathfile)\n\n try:\n exists = os.path.exists(path)\n if exists:\n lx.out(\n \"Tried creating folder \\\"\" + path + \"\\\", but it already exists.\")\n else:\n try:\n os.makedirs(path)\n lx.out(\"Created folder: \" + str(path))\n except OSError:\n if not os.path.isdir(path):\n raise\n\n except:\n lx.eval('layout.createOrClose EventLog \"Event Log_layout\" '\n 'title:@macros.layouts@EventLog@ width:600 height:600 persistent:true '\n 'open:true')\n lx.out(\"ERROR creating path for \" + path, sys.exc_info())", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def _save_model(self, path):\n self.ov_model._model_exists_or_err()\n path = Path(path)\n path.mkdir(exist_ok=True)\n xml_path = path / self.status['xml_path']\n save(self.ov_model.ie_network, xml_path)" ]
[ "0.7024828", "0.700881", "0.6860256", "0.68011755", "0.6756072", "0.6738238", "0.66949385", "0.66890776", "0.6670779", "0.6649409", "0.6647468", "0.66387165", "0.66373086", "0.66274315", "0.66152924", "0.66047573", "0.6588086", "0.6582058", "0.656134", "0.65589786", "0.6516234", "0.65015745", "0.6493719", "0.6480533", "0.64678985", "0.64626414", "0.6458174", "0.6457365", "0.6452825", "0.6414028", "0.6372484", "0.6370228", "0.6347025", "0.6345939", "0.6338497", "0.6324108", "0.6291854", "0.62835765", "0.6276729", "0.62753785", "0.6275046", "0.6270509", "0.62613064", "0.6259942", "0.6258505", "0.6252082", "0.6248139", "0.6245077", "0.6221", "0.620353", "0.6200295", "0.6189694", "0.61884356", "0.61864054", "0.618562", "0.618034", "0.6161751", "0.61568505", "0.61562496", "0.6146393", "0.614009", "0.6136438", "0.6136197", "0.6135469", "0.61267996", "0.6125547", "0.6124874", "0.6123856", "0.61224586", "0.6121813", "0.6095937", "0.6092598", "0.6090721", "0.6090219", "0.60901695", "0.607847", "0.607847", "0.607847", "0.607847", "0.6077454", "0.60730875", "0.6072712", "0.60705996", "0.6070358", "0.60647595", "0.6048111", "0.6046724", "0.6046125", "0.60400003", "0.6037574", "0.6035458", "0.60314155", "0.603071", "0.6030112", "0.6029105", "0.602834", "0.60235", "0.6012969", "0.6012969", "0.6002472" ]
0.7450946
0
takes input and returns a string
def makeinputstring(variabel): if type(variabel) == int: return str(variabel) elif type(variabel) == float: return str(int(float(variabel))) else: return str(variabel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input_string():\n return input(\"Enter input string: \")", "def get_string(opt=\"encode\"):\n text = input(f\"Enter string to {opt}: \")\n return text", "def input_str() -> str:\n\tinput_string = str(input('Enter your string: '))\n\treturn input_string", "def input_str() -> str:\n\t\tinput_string = str(input('Enter your string: '))\n\t\treturn input_string", "def create_name() -> str:\r\n user_input = str(input(\"What is your name?\\n\"))\r\n return user_input", "def format_input(input):\n return f\"{input}> \"", "def get_string_input():\n string_input = input('Please enter string: ')\n return string_input", "def getstring(message = \"Enter a value: \"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\treturn raw_input(message)", "def pr_str(inp_string):\n if inp_string is None:\n return ''\n else:\n return str(inp_string)", "def inask(question: str) -> str:\n answer = input(question)\n return answer", "def construct_path(self, str_input):\n\n str_result = str_input[len('\\input{'):-1]\n str_result = self.publication_path + str_result\n\n #print '[i] constructed path {}'.format(str_result)\n\n if str_result[-3:] != 'tex':\n str_result = str_result + '.tex'\n return str_result", "def question_3(name: str) -> str:\n return \"My first name is\" + \" \" + name", "def get_name():\n return raw_input(\"What's your name? \")", "def get_name():\n print(\"\")\n name = input(\"Greetings, what do we call you?: \")\n return name", "def question_1(name: str) -> str:\n return \"Hello my name is\" + \" \" + name", "def get_name():\r\n name = input(\"What is the customer's name?: \")\r\n\r\n return name", "def prompt_str_input(prompt_name: str, get_user_input: GetInputFunc) -> str:\n try:\n return str(get_user_input(f\"type in {prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))", "def get_input(user_input):\n return input(user_input)", "def plain_text( argument ):\n return str( argument )", "def main():\n\tprint 'Introduce string: '\n\ts = raw_input()\n\treturn if_unique_chars_one(s)", "def get_input(message:str) -> str:\n response = \"\"\n try:\n response = input(message)\n except:\n pass \n return response", "def input(index: int = 0) -> str:\n return inputs()[index]", "def _get_string():\n result = sys.stdin.readline().rstrip('\\n')\n return result", "def obtain_filename():\n file_wanted = input(\"Filename? \")\n return file_wanted", "def get_search_string():\n return input(\"Enter search name, or phone number: \")", "def _input_path() -> str:\n path_string = input('Path:').strip()\n return path_string", "def requestInput(st):\n return input(st+\": \")", "def userInput(self, message):\n data = raw_input(\"%s\" % message)\n return str(data)", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def GetString(title, max_input, init_val = \"\"):\r\n return _hiew.HiewGate_GetString(title, max_input, init_val)", "def get_task_name():\n clear()\n task_name = input(\"Task Name: \")\n\n if len(task_name) == 0:\n input(\"Name must have at least one character.\")\n return get_task_name()\n else:\n return task_name", "def get_name():\n clear()\n name = input(\"Employee Name: \")\n\n if len(name) == 0:\n input(\"Name must have at least one character.\")\n return get_name()\n else:\n return name", "def get_input():\n choice = input(\"(F)ile or (S)tring? \").upper()\n while not choice or choice[0] not in ['F', 'S']:\n choice = input(\"Please enter either 'F' or 'S'. Again (F/S)? \").upper()\n if choice[0] == 'S':\n input_str = input(\"Enter the string to encrypt/decrypt: \")\n else:\n filename = input(\"Filename: \")\n input_str = read_from_file(filename)\n return input_str.upper()", "def get_input(prompt):\n # type: (str) -> str\n return raw_input(prompt)", "def get_user_string(message):\n while True:\n user_input = input('{}: '.format(message))\n # This is a bad way to check if the user input is not empty.\n # It will be True if the user enters spaces, tabs, etc.\n if user_input:\n return user_input\n else:\n print('You must enter something.')", "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name", "def __string(input_string, name=\"\", internal=False):\n if input_string is None:\n __ex(\"The %s is missing.\" % name, internal)\n if input_string == \"\":\n __ex(\"The %s must not be empty.\" % name, internal)", "def _get_input(question: str) -> str:\n print(question)\n sys.stdout.flush()\n user_input = sys.stdin.readline()\n user_input = user_input.strip()\n return user_input", "def question_5(first_name: str, last_name: str) -> str:\n return \"My full name is\" + \" \" + first_name + \" \" + last_name", "def text_input():\n return input(\">>>\")", "def get_name():\n return input(\"Enter Name: \").capitalize()", "def part2_test_input():\n return \"\"\"\"\"\"", "def next_line() -> str:\n return input().strip()", "def inputString(self):\n return self.__inputString", "def output_str(string:str) -> str:\n print(string)", "def get_name() -> str:", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def gets_user_name():\n name = raw_input(\"What's your name? \")\n return name", "def part1_test_input():\n return \"\"\"\"\"\"", "def random_str(inp):\n if isinstance(inp, int):\n n = inp\n elif isinstance(inp, str):\n n = len(inp)\n else:\n raise ValueError(\"Invalid input (must be str or int)\")\n chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(random.sample(chars, n))", "def ui_input_text() -> str:\n\ttext = input('enter your text ')\n\treturn text", "def _input_symbol() -> str:\n symbol = input('Symbol: ').strip().upper()\n if symbol == '':\n return ''\n else:\n return symbol", "def getString(self):\n print \"Enter String value:\",\n self.string = raw_input()", "def inp(text):\r\n input(text)", "def hello_name(s):\n to_return = \"Hello {}!\".format(s)\n return to_return", "def string_rotate() -> str:\n\n\tdef input_str() -> str:\n\t\t\"\"\" This function make input of string data\"\"\"\n\t\tinput_string = str(input('Enter your string: '))\n\t\treturn input_string\n\n\tdef input_len() -> int:\n\t\t\"\"\" This function make input of length rotation string\"\"\"\n\t\tinput_length = int(input('Enter your length rotation: '))\n\t\treturn input_length\n\n\tinput_string = input_str()\n\tinput_length = input_len()\n\t\n\tchange_str = ''\n\t\n\tif input_length > 0:\n\t\tchange_str = input_string[input_length:len(input_string)] + input_string[0:input_length]\n\telif input_length < 0:\n\t\tchange_str = input_string[input_length:] + input_string[:input_length]\n\telse:\n\t\tprint(\"Intput length = 0\")\n\n\treturn print(change_str)", "def get_user_text_input(self):\n\t\tuser_input = raw_input('You: ')\n\t\treturn user_input", "def get_encoded_msg():\n print(\"Enter text you would like to decode:\\n\")\n e_msg = input(\">\")\n return e_msg", "def get_input(binary=False):\n print(\"* Input *\")\n choice = _get_selection(\"(F)ile or (S)tring? \", \"FS\")\n if choice == 'S':\n text = input(\"Enter a string: \").strip().upper()\n while not text:\n text = input(\"Enter a string: \").strip().upper()\n if binary:\n return bytes(text, encoding='utf8')\n return text\n else:\n filename = get_filename()\n flags = 'r'\n if binary:\n flags += 'b'\n with open(filename, flags) as infile:\n return infile.read()", "def getUsername():\n return input(\"Username\")", "def _get_string(self):\n result = sys.stdin.readline().rstrip('\\n')\n return result", "def getName():\n\n tcflush(sys.stdin, TCIFLUSH)\n name = input(\" You say:\\n \")\n updateNameDatabase(name)\n return name", "def get_input():\n # prompt the user for input and return the input\n user_input = input(\"Please enter your message: \")\n return user_input", "def get_text(text_input):\r\n return text_input", "def input_name(inputname):\n\tpath= (os.path.abspath(inputname))\n\treturn (path.split(\"/\")[-1].split(\".\")[0])", "def random_string(input: list):\n num = rand.randint(0,len(input)-1)\n return input[num]", "def get_input(self):\n print(\" String --> \", end='')\n self.string = str(raw_input())\n print(\" Substring --> \", end='')\n self.substring = str(raw_input())\n return self.string, self.substring", "def get_user_input():\n\n # note: input forced to lower case. Case effects LD calculations\n string1 = input(\"Enter first word (source) [\" + str(_string1) + \"]: \").strip().lower()\n if not string1:\n string1 = _string1\n string2 = input(\"Enter second word (target) [\" + str(_string2) + \"]: \").strip().lower()\n if not string2:\n string2 = _string2\n debug = input(\"Enter level of output (0, 1, 2) [\" + str(_debug) + \"]: \").strip()\n if not debug:\n debug = _debug\n if int(debug) > 2:\n debug = 2\n if int(debug) < 0:\n debug = 0\n\n return string1, string2, int(debug),", "def QueryStr(cls, varName: str) -> str:\n\n global userInput\n\n try:\n userInput = input(\"{}: \".format(varName.capitalize()))\n\n # Raises a ValueError if userInput CAN be recast as integer.\n if userInput.isdigit():\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(\"\\nPlease enter a valid {}.\".format(varName))\n cls.QueryStr(varName)\n\n except Exception:\n print(\"\\nOops something is buggy\")\n\n return userInput", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def _input(str=''):\n print(str, end='', flush=True)\n return stdin.readline().rstrip('\\n')", "def input_(text: str):\n inputText = input(text + bcolors.OKBLUE)\n print(bcolors.ENDC, flush=True, end=\"\")\n return inputText", "def as_action_str(string: str) -> str:", "def ui_input() -> str:\n return input('Enter cards numbers(spaces needed): ')", "def question_2(name: str) -> str:\n return \"Hello my name is\" + \" \" + name.capitalize()", "def convert_to_str(input_string):\n\n if sys.version < '3':\n\n if isinstance(input_string, str) \\\n or isinstance(input_string, unicode): # pragma: no cover py3\n\n return input_string # pragma: no cover py3\n\n else:\n\n if isinstance(input_string, str): # pragma: no cover py3\n\n return input_string # pragma: no cover py3\n\n return str(input_string)", "def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')", "def get_phone_number(phone_number):\n return input(\"Enter Updated Phone Number for {phone}: \".format(phone=phone_number))", "def enterMessage ():\r\n a = input(\"Enter the message:\\n\")\r\n return a", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def get_cli_string():\n return os.path.basename(sys.argv[0]) + \" \" + \" \".join(sys.argv[1:])", "def question_4(name: str) -> str:\n return \"My first name is\" + \" \" + name.capitalize()", "def get_input_from_player(text):\n return prompt.string(text)", "def _input(msg):\n if sys.version_info.major >= 3:\n ans = input(msg)\n elif sys.version_info.major == 2:\n ans = raw_input(msg)\n else:\n raise Exception(\"Unsupported python version. Please upgrade to python 2 or higher.\")\n\n return ans", "def input_desc():\r\n desc = input(\"Entrez ici les remarques générales du directeur du tournoi: \")\r\n return desc", "def get_manual_test_string():\n test_string = \"\"\n while test_string == \"\":\n test_string = input(\"String to test (type 'q' to exit): \")\n test_string = test_string.strip()\n\n if test_string == \"\":\n print (\"Error: You must provide some input for the system to reply.\")\n return test_string", "def input_(self) -> str:\n\n # Try to input through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n return IO.stdin()", "def question_12(one_string: str, two_string: str) -> str:\n return one_string + two_string", "def input(msg: str):\n ret = input(msg)\n return ret", "def user(name):\n return f\"Hello {name}!\"", "def get_user_text() -> str:\n validinput = False\n while not validinput:\n intext = input(\"Which of your most favorite quotes can Polly cook up for you?\")\n if len(intext) > POLLY_CHAR_LIMIT:\n print(\"You have entered in more text that Polly can support in one call.\")\n validinput = False\n else:\n validinput = True\n return intext", "def get_phone_number():\r\n phone_number = input(\"What is the customer's phone number?: \")\r\n\r\n return phone_number", "def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename", "def str(x) -> String:\n pass", "def input_cislo_policka():\n str_policka = input('\\nNa ktore policko chces umiestnit svoje \\'X\\'? Zadaj hodnotu 0 - 19: ')\n return str_policka", "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def get_problem_type():\n problem_type = input(\"What type of problem do you want?\")\n return problem_type", "def format_name(f_name, l_name): #docstring (documentation)\n if f_name == \"\" or l_name == \"\":\n return \"You didn't provide valid inputs.\"\n formated_f_name = f_name.title()\n formated_l_name = l_name.title()\n return f\"Result: {formated_f_name} {formated_l_name}\"" ]
[ "0.73874146", "0.7343399", "0.72803926", "0.7171811", "0.7066056", "0.6929175", "0.69019353", "0.6721488", "0.66247624", "0.65604264", "0.6521488", "0.6518847", "0.6444485", "0.6426503", "0.6425762", "0.63756746", "0.6344128", "0.6306453", "0.6288618", "0.6283329", "0.62710667", "0.62408", "0.6234201", "0.622494", "0.6209735", "0.61986476", "0.61560863", "0.6153649", "0.61531544", "0.61274916", "0.6126864", "0.6102809", "0.60857713", "0.607321", "0.60463506", "0.6042637", "0.60383886", "0.60324126", "0.60220194", "0.6000611", "0.5992177", "0.5988147", "0.59795046", "0.59655523", "0.59641", "0.59464884", "0.59274083", "0.59206176", "0.59189737", "0.59142905", "0.5901088", "0.5890008", "0.58836526", "0.5877432", "0.5871013", "0.5869674", "0.5840329", "0.58344406", "0.583417", "0.5830369", "0.58235574", "0.5817236", "0.5788914", "0.57850933", "0.5780109", "0.57684815", "0.57673", "0.57657295", "0.5758669", "0.57456636", "0.57455915", "0.57455915", "0.5745064", "0.5736686", "0.57337934", "0.5733244", "0.57225657", "0.57203126", "0.570259", "0.56730247", "0.5672775", "0.56700134", "0.56452537", "0.56396896", "0.5630277", "0.56277734", "0.56150526", "0.5610598", "0.5608944", "0.56056577", "0.560529", "0.5603649", "0.5603414", "0.5593375", "0.5584419", "0.558358", "0.5580959", "0.5575308", "0.5569884", "0.5564544" ]
0.63531923
16
This function loops through the excel and sorts out elements, names, texts and where in the tree they should be added.
def loop_through_col(steps, safecookie, b, file_name, var, list_error, list_of_project_info): col, k, j, g = 4, 1, 0, 0 sheet = get_excel(exceldokument) row_for_commitname = 2 #nya #row_for_commitname = 1 #gamla while col < sheet.ncols: if sheet.cell_type(0, col) != 0: j += 1 if sheet.cell_type(row_for_commitname, col)!= 0: g += 1 col += 1 if j == 0 or g == 0: p = AddFileWithError(file_name, "4") list_error = p.add_el(list_error, p) return list_error col = 4 while col < sheet.ncols: if sheet.cell_type(0, col) != 0: name = (sheet.cell_value(0, col)) commit = Steps(name).addtoxml(k, j, steps, safecookie) commit_name = (sheet.cell_value(row_for_commitname, col)) list_error = check_cell_error(b, col, sheet, list_error, file_name) if commit_name.lower() == "littra": commit_name = "littra" if k == 1 and sheet.cell_type(b, col) != 0: c_name = (sheet.cell_value(b, col)) AddAttrToTree(commit, commit_name, makeinputstring(c_name)).addtoxml() for x in range(0, 5): AddAttrToTree(commit, list_of_project_info[x], makeinputstring(var[x])).addtoxml() elif k != 1 and sheet.cell_type(b, col) != 0: c_name = (sheet.cell_value(b, col)) AddAttrToTree(commit, commit_name, makeinputstring(c_name)).addtoxml() k += 1 n = col + 1 if sheet.cell_type(0, n) != 0: if sheet.cell_type(b, col) != 0: commit_name = (sheet.cell_value(row_for_commitname, col)) c_name = (sheet.cell_value(b, col)) AddAttrToTree(commit, commit_name, makeinputstring(c_name)).addtoxml() elif sheet.cell_type(0, n) == 0: try: while sheet.cell_type(0, n) == 0 and n - 1 < sheet.ncols: if sheet.cell_type(b, n) != 0: list_error = check_cell_error(b, n, sheet, list_error, file_name) commit_name = (sheet.cell_value(row_for_commitname, n)) c_name = (sheet.cell_value(b, n)) AddAttrToTree(commit, commit_name, makeinputstring(c_name)).addtoxml() n += 1 except: pass col += 1 return list_error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_excel(self):\r\n try:\r\n self.check_point_1 = True\r\n self.filename = tkFileDialog.askopenfilename(initialdir=\"/\", title=\"Select file\",\r\n filetypes=((\"excel files\", \"*.xlsx\"), (\"all files\", \"*.*\")))\r\n\r\n book = open_workbook(self.filename)\r\n\r\n if self.filename == '' or self.filename.split('/')[-1] != 'Engr Student Data.xlsx':\r\n self.check_point_1 = False\r\n if self.check_point_1 == True:\r\n self.headings = ['ID', 'Name', 'Section', 'Dept', 'GPA', 'MP1 Grade:', 'MP2 Grade:', 'MP3 Grade:',\r\n 'MT Grade:', 'Final Grade:']\r\n book = open_workbook(self.filename)\r\n sheet = book.sheet_by_index(0)\r\n for row_index in range(1, sheet.nrows):\r\n index = 0\r\n traits = []\r\n for column_index in range(sheet.ncols):\r\n traits.append(sheet.cell(row_index, column_index).value)\r\n self.student[sheet.cell(row_index, 1).value] = traits\r\n index += 1\r\n used_list = []\r\n for key in self.student:\r\n used_list.append(key)\r\n self.sort_list = sorted(used_list)\r\n for index in range(len(self.sort_list) - 1, -1, -1):\r\n single = self.sort_list[index].split(' ')\r\n if len(single) == 3:\r\n app.tree.insert(\"\", 0, text=int(self.student[self.sort_list[index]][0]),\r\n values=(single[:2], single[2]))\r\n continue\r\n app.tree.insert(\"\", 0, text=int(self.student[self.sort_list[index]][0]),\r\n values=(single[0], single[1]))\r\n app.info.configure(text=\"INFO: File Loaded.\")\r\n # If the user loaded the file properly, 'Info' Label should shows the message: 'INFO: File Loaded.'\r\n\r\n except (IOError, XLRDError):\r\n app.info.configure(text=\"INFO: Loading Failed! Please Try Again.\", font=('','7'))\r\n # If the user did not load the file properly,'Info' Label shows the message: 'INFO: Loading Failed! Please Try Again.'\r", "def _from_tree_to_etree(self):\n categories = self.tree.get_children('')\n# messagebox.showwarning('_from_tree_to_etree', \\\n# 'categories={}'.format(categories))\n for category in categories:\n \n acategory = etree.SubElement(self.trout, self.tree.item(category)['text'])\n if category =='approved':\n acategory.set('tags', \"('approved',)\")\n elif category =='conflicts':\n acategory.set('tags', \"('conflicts',)\")\n elif category =='suggestions':\n acategory.set('tags', \"('suggestions',)\")\n elif category =='unknown':\n acategory.set('tags', \"('unknown',)\")\n elif category =='cldr':\n acategory.set('tags', \"('cldr',)\")\n else:\n messagebox.showerror('_from_tree_to_etree', \\\n 'unrecognised category >{}<'.format(category))\n return\n# acategory.text = self.tree.item(category)['text']\n sons = self.tree.get_children(category)\n# messagebox.showwarning('_from_tree_to_etree', \\\n# '{}, sons={}'.format(category, sons))\n for son in sons:\n ason = etree.SubElement(acategory, son)\n# ason.text = self.tree.item(son)['text']\n ason.set('values', '{}'.format(self.tree.item(son)['values']))\n ason.set('tags', '{}'.format(tuple(self.tree.item(son)['tags'])))\n grandsons = self.tree.get_children(son)\n for grandson in grandsons:\n agrandson = etree.SubElement(ason, grandson)\n agrandson.text = self.tree.item(grandson)['text']\n agrandson.set('values', \\\n '{}'.format(self.tree.item(grandson)['values']))\n agrandson.set('tags', \\\n '{}'.format(tuple(self.tree.item(grandson)['tags'])))\n# grandsons = self.tree.get_children(grandson)\n# messagebox.showwarning('','{}'.format(etree.tostring(self.trout, \\\n# encoding='unicode', \\\n# pretty_print=True)))\n# messagebox.showwarning('_from_tree_to_etree', \\\n# 'filled with {} categories'.\\\n# format([child.tag for child in self.trout]))\n return self.trout", "def updateRowHierarchy(self, i, j) :\n if (self.isEmpty(i,j) or str(self.source_cell.value).lower().strip() == 'id.') :\n # If the cell is empty, and a HierarchicalRowHeader, add the value of the row header above it.\n # If the cell above is not in the rowhierarchy, don't do anything.\n # If the cell is exactly 'id.', add the value of the row header above it. \n try :\n self.rowhierarchy[i][j] = self.rowhierarchy[i-1][j]\n self.log.debug(\"({},{}) Copied from above\\nRow hierarchy: {}\".format(i,j,self.rowhierarchy[i]))\n except :\n # REMOVED because of double slashes in uris\n # self.rowhierarchy[i][j] = self.source_cell.value\n self.log.debug(\"({},{}) Top row, added nothing\\nRow hierarchy: {}\".format(i,j,self.rowhierarchy[i]))\n elif str(self.source_cell.value).lower().startswith('id.') or str(self.source_cell.value).lower().startswith('id '):\n # If the cell starts with 'id.', add the value of the row above it, and append the rest of the cell's value.\n suffix = self.source_cell.value[3:] \n try : \n self.rowhierarchy[i][j] = self.rowhierarchy[i-1][j]+suffix\n self.log.debug(\"({},{}) Copied from above+suffix\\nRow hierarchy {}\".format(i,j,self.rowhierarchy[i]))\n except :\n self.rowhierarchy[i][j] = self.source_cell.value\n self.log.debug(\"({},{}) Top row, added value\\nRow hierarchy {}\".format(i,j,self.rowhierarchy[i]))\n elif not self.isEmpty(i,j) :\n self.rowhierarchy[i][j] = self.source_cell.value\n self.log.debug(\"({},{}) Added value\\nRow hierarchy {}\".format(i,j,self.rowhierarchy[i]))\n return self.rowhierarchy", "def _from_etree_to_tree(self, lang='en-US'):\n #clear existing tree\n# for i in self.tree.get_children():\n# self.tree.delete(i)\n self.tree.delete(*self.tree.get_children())\n #now insert old tree\n for category in self.trout:\n tagged = category.get('tags')\n if tagged is None:\n tagged = \"('{}',)\".format(category.tag)\n if tagged[-1] == ')':\n inserttext = tagged[2:3].upper() + tagged[3:tagged.find(')')-2]\n else:\n inserttext = tagged[1:2].upper() + tagged[2:-1]\n #messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(lang, inserttext))\n thiscategory = self.tree.insert('', 'end', iid=inserttext.lower(), values=['', ''], \\\n text=LOCALIZED_TEXT[lang][inserttext], tags=\"{}\".format(inserttext.lower()))\n for term in category:\n values = eval(term.get('values'))\n tags = term.get('tags')\n# messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(values, tags))\n thisterm = self.tree.insert(thiscategory, 'end')\n self.tree.item(thisterm, tags=term.get('tags'))\n self.tree.item(thisterm, text=term.text)\n self.tree.item(thisterm, values=[str(values[0]), str(values[1])])\n# tags=term.get('tags'))\n for rendering in term:\n thisrendering = self.tree.insert(thisterm, 'end', \\\n text=rendering.text, values=term.get('values'), \\\n tags=rendering.get('tags'))\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.tree.update() \n pass", "def process(workbook: Any, contents: list) -> None:\n worksheet_name = 'Storage Inventory'\n worksheet = workbook.get_sheet_by_name(worksheet_name)\n\n headers = list(concat([\n ['Hostname', 'Model', 'OS', 'Nodes'],\n get_parser_header(DEDUPE_TMPL)\n ]))\n RowTuple = namedtuple('RowTuple', headers)\n build_header(worksheet, headers)\n\n rows = []\n for content in contents:\n doc = xmltodict.parse(content)\n component_details = search_tag_value(doc, 'component_details')\n command_details = search_tag_value(doc, 'command_details')\n\n dedupe, nodes = [], 0 # type: (list, int)\n for entry in command_details:\n nodes_content = collected_data(\n entry, 'cmd', 'isi storagepool nodepools list')\n nodes = max(map(compose(int, itemgetter(0)),\n run_parser_over(\n nodes_content,\n NODES_TMPL))) if nodes_content else nodes\n\n dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats')\n dedupe = run_parser_over(\n dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe\n\n dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']]\n rows.append([\n component_details['hostname'],\n component_details['model'],\n component_details['os'], str(nodes), *dedupe[0]\n ])\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(chr(col_n), row_n)]\n cell.value = str.strip(col_value)\n style_value_cell(cell)\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n 'StorageInventoryTable',\n 'Storage Inventory',\n final_col,\n final_row)", "def auto_input(filename_location):\n ds = pd.read_excel(filename_location, header=None)\n collection = []\n\n # print(ds.columns)\n\n for i in range(len(ds.columns)):\n collection.append(ds.values.T[i].tolist())\n # print(ds.values.T[i].tolist())\n\n print(collection)\n sorted_collection = [[] for i in range(len(collection[0]))]\n\n for j in collection:\n for k in range(len(j)):\n pass", "def structure_by_package(mel):\n \"\"\"receives in a pandas dataframe\"\"\"\n string='K10024-'\n WP='00'\n l={}\n mel['Level 1','Level 2','Level 3','Level 4']=''\n mel['WP']=mel['Level'].str.replace('.','',regex=True) \n for i,row in mel.iterrows():\n print (WP)\n if (type(row['WP Activity/ Part No.']) is str) and (string in row['WP Activity/ Part No.']) :\n #new section starts:\n WP=row['WP Activity/ Part No.']\n l[row['Level']]=row['Equipment Description']\n \n mel.loc[i,'WP']=WP\n for key in l.keys():\n mel.loc[i,'Level ' +key]=l[key]\n \n mel.dropna(subset=['Delivery','WP'], inplace=True)\n \n mel['WP']=mel['WP'].str.replace('K10024-','',regex=False) \n mel['WP']=mel['WP'].str[:2]\n mel.drop(columns=['Level'],inplace=True) \n mel.to_excel('packages_MEL02.xlsx')\n return mel", "def read_excel(input_filename):\n with open_workbook(input_filename) as wobo:\n # reading Links sheet\n links_sheet = wobo.sheet_by_name('Links')\n nodes = {}\n for row in ALL_ROWS(links_sheet, start=5):\n try:\n nodes[row[0].value].to_node.append(row[1].value)\n except KeyError:\n nodes[row[0].value] = Node(row[0].value, [row[1].value])\n try:\n nodes[row[1].value].to_node.append(row[0].value)\n except KeyError:\n nodes[row[1].value] = Node(row[1].value, [row[0].value])\n\n nodes_sheet = wobo.sheet_by_name('Nodes')\n for row in ALL_ROWS(nodes_sheet, start=5):\n node = row[0].value\n eqpt = row[6].value\n try:\n if eqpt == 'ILA' and len(nodes[node].to_node) != 2:\n print(f'Inconsistancy ILA node with degree > 2: {node} ')\n exit()\n if eqpt == '' and len(nodes[node].to_node) == 2:\n nodes[node].eqpt = 'ILA'\n elif eqpt == '' and len(nodes[node].to_node) != 2:\n nodes[node].eqpt = 'ROADM'\n else:\n nodes[node].eqpt = eqpt\n except KeyError:\n print(f'inconsistancy between nodes and links sheet: {node} is not listed in links')\n exit()\n return nodes", "def parseSheet(self):\n self.log.info(\"Parsing {0} rows and {1} columns.\".format(self.rowns,self.colns))\n \n self.column_dimensions = {}\n self.property_dimensions = {}\n self.row_dimensions = {}\n self.rowhierarchy = {}\n\n # Get dictionary of annotations\n self.annotations = self.r_sheet.cell_note_map\n \n for i in range(0,self.rowns):\n self.rowhierarchy[i] = {}\n \n for j in range(0, self.colns):\n # Parse cell data\n self.source_cell = self.r_sheet.cell(i,j)\n self.source_cell_name = cellname(i,j)\n self.style = self.styles[self.source_cell].name\n self.cellType = self.getType(self.style)\n self.source_cell_qname = self.getQName(self.source_cell_name)\n \n self.log.debug(\"({},{}) {}/{}: \\\"{}\\\"\". format(i,j,self.cellType, self.source_cell_name, self.source_cell.value))\n\n # Try to parse ints to avoid ugly _0 URIs\n try:\n if int(self.source_cell.value) == self.source_cell.value:\n self.source_cell.value = int(self.source_cell.value)\n except ValueError:\n self.log.debug(\"(%s.%s) No parseable int\" % (i,j))\n\n \n # Parse annotation (if any)\n if self.config.get('annotations', 'enabled') == \"1\":\n if (i,j) in self.annotations:\n self.parseAnnotation(i, j)\n\n # Parse cell even if empty\n if self.cellType == 'Data':\n self.parseData(i, j)\n elif (self.cellType == 'HRowHeader') :\n self.updateRowHierarchy(i, j)\n elif self.cellType == 'ColHeader' :\n self.parseColHeader(i, j)\n elif self.cellType == 'RowProperty' :\n self.parseRowProperty(i, j)\n \n # If cell not empty, check for more types\n if not self.isEmpty(i,j) :\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],RDF.type,self.namespaces['tablink'][self.cellType]))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['cell'],Literal(self.source_cell_name)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['col'],Literal(colname(j))))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['row'],Literal(i+1)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname] isrow row\n if self.cellType == 'Title' :\n self.parseTitle(i, j)\n \n elif self.cellType == 'RowHeader' :\n self.parseRowHeader(i, j)\n \n elif self.cellType == 'HRowHeader' :\n self.parseHierarchicalRowHeader(i, j)\n \n elif self.cellType == 'RowLabel' :\n self.parseRowLabel(i, j)\n \n # Add additional information about the hierarchy of column headers\n for value in self.column_dimensions.values():\n for index in range(1, len(value)):\n uri_sub = self.getColHeaderValueURI(value[:index+1])\n uri_top = self.getColHeaderValueURI(value[:index])\n self.graph.add((uri_sub, self.namespaces['tablink']['subColHeaderOf'], uri_top))\n self.graph.add((uri_sub, self.namespaces['tablink']['depth'], Literal(index)))\n self.graph.add((uri_top, self.namespaces['tablink']['depth'], Literal(index-1)))\n \n self.log.info(\"Done parsing...\")", "def _finalize_cells(self):\n # Order by time (as path) and then drilldown dimension value (group)\n # The key[0] is a list of paths: time, another_drilldown\n\n order = lambda left, right: cmp(left[0], right[0])\n cells = self.time_cells.items()\n cells.sort(order)\n\n # compute the current datetime, convert to path\n current_time_path = time_to_path(\n pytz.timezone('UTC').localize(datetime.utcnow()).astimezone(self.browser.timezone).strftime(\"%Y-%m-%d %H:00:00\"), \n self.last_time_level, \n self.time_hierarchy)\n\n self.cells = []\n for key, cell in cells:\n # If we are aggregating at finer granularity than \"all\":\n time_key = key[0]\n if time_key:\n # if time_key ahead of current time path, discard\n if time_key > current_time_path:\n continue\n cell.update(zip(self.time_levels, time_key))\n\n # append the drilldown_on attribute ref\n if self.drilldown_on:\n cell[self.drilldown_on] = self.drilldown_on_value_func(key[1])\n\n self.cells.append(cell)", "def BuildTree(self):\n \n try:\n \n for ID in self.IDs:\n \n self.tree.delete(ID)\n \n except:\n \n pass\n \n \n self.IDs = []\n \n if self.Gathered == None:\n \n for col in self.Header:\n self.tree.heading(col,\n text=col.title(),\n command=lambda c=col: self.SortBy(self.tree, c, 0))\n \n # adjust the column's width to the header string\n self.tree.column(col,\n width=tkFont.Font().measure(col.title())+20)\n \n else:\n \n for col in self.Header:\n self.tree.heading(col,\n text=col.title(),\n command=lambda c=col: self.SortBy(self.tree, c, 0))\n \n # adjust the column's width to the header string\n self.tree.column(col,\n width=tkFont.Font().measure(col.title())+20)\n\n\n for item in self.Gathered:\n \n self.IDs.append(self.tree.insert('', 'end', values=item))", "def SortBy(self,tree, col, descending):\n \n # grab values to sort\n data = [(tree.set(child, col), child) for child in tree.get_children('')]\n \n # now sort the data in place\n data.sort(reverse=descending)\n \n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n \n # switch the heading so it will sort in the opposite direction\n tree.heading(col,command=lambda col=col: self.SortBy(tree, col,int(not descending)))", "def sort(self): \n \n for i in range(0,len(self.tasks)):\n if len(self.tasks[i].build_block_belong) == 3:\n self.tree.add_element(self.tasks[i].build_block_belong,i)", "def sortby(tree, col, descending): # 重新排序 <-- 文字版\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n # 數字的排法(但文字部分就無法排序)\n #data.sort(key=lambda data: int(data[0]), reverse=descending)\n\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, \\\n int(not descending)))", "def _collapse_all(self):\n# global approved, conflicts, suggestions, unknown, cldr\n self.tree.item('approved', open=False, \\\n values=[self._count_children('approved'), ''])\n for child in self.tree.get_children('approved'):\n self.tree.item(child, tags='approved')\n\n self.tree.item('conflicts', open=False, \\\n values=[self._count_children('conflicts'), ''])\n for child in self.tree.get_children('conflicts'):\n self.tree.item(child, tags='conflicts')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='conflicts',)\n \n self.tree.item('suggestions', open=False, \\\n values=[self._count_children('suggestions'), ''])\n for child in self.tree.get_children('suggestions'):\n self.tree.item(child, tags='suggestions')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='suggestions')\n\n self.tree.item('unknown', open=False, \\\n values=[self._count_children('unknown'), ''])\n for child in self.tree.get_children('unknown'):\n self.tree.item(child, tags='unknown')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='unknown')\n\n self.tree.item('cldr', open=False, \\\n values=[self._count_children('cldr'), ''])\n for child in self.tree.get_children('cldr'):\n self.tree.item(child, tags='cldr')\n self.tree.item(child, open=False)\n for granchild in self.tree.get_children(child):\n self.tree.item(granchild, tags='cldr')\n\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def parse_pages():\n\n excel_filename = 'Result_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.xlsx'\n workbook = xlsxwriter.Workbook(excel_filename)\n worksheet_all = workbook.add_worksheet()\n\n create_headers(worksheet_all, workbook)\n\n row = 1\n col = 0\n\n cell_format = workbook.add_format()\n cell__wrapped_format = workbook.add_format()\n cell__wrapped_format.set_text_wrap()\n site_url = 'http://medsalltheworld.com/'\n for full_filename in get_html_filenames():\n with open(full_filename, \"r\", encoding=\"utf-8\") as html_file:\n try:\n soup = BeautifulSoup(html_file.read(), \"lxml\")\n product_name_elements = soup.find_all(\"li\", class_=\"col-xs-6 col-md-4\")\n for elem in product_name_elements:\n name = elem.select('h3')[0].text.replace('®', '')\n elem_url = site_url + elem.select('h3')[0].find('a')['href']\n\n worksheet_all.write(row, col, name, cell_format)\n worksheet_all.write(row, col + 1, elem_url, cell_format)\n worksheet_all.write(row, col + 2, full_filename, cell_format)\n row += 1\n\n except AttributeError:\n print(full_filename)\n\n workbook.close()", "def nodes_data_excel_parser(excel_path,**kwargs):\n excel_parser_engine = kwargs.get(\"engine\",\"xlrd\")\n\n # Check if excel file exists\n if not excel_path or not os.path.isfile(excel_path):\n raise FileNotFoundError(\n \"Excel data file {} not found.\".format(excel_path)\n )\n\n xls = pd.ExcelFile(excel_path,engine=excel_parser_engine)\n\n try:\n # TODO for sheet in xls.sheet_names:\n # nodes_data[sheet] = xls.parse(sheet)\n nodes_data = {\n \"buses\": xls.parse(\"buses\").replace({np.nan:None}),\n \"commodity_sources\": xls.parse(\"commodity_sources\").replace({np.nan:None}),\n \"transformers\": xls.parse(\"transformers\").replace({np.nan:None}),\n \"transformers_chp\": xls.parse(\"transformers_chp\").replace({np.nan:None}),\n \"renewables\": xls.parse(\"renewables\").replace({np.nan:None}),\n \"demand\": xls.parse(\"demand\").replace({np.nan:None}),\n \"storages\": xls.parse(\"storages\").replace({np.nan:None}),\n \"powerlines\": xls.parse(\"powerlines\").replace({np.nan:None}),\n \"timeseries\": xls.parse(\"time_series\").replace({np.nan:None}),\n \"financial\":xls.parse(\"financial\").replace({np.nan:None})\n }\n except KeyError:\n err_msg = \"Excel file must contains: [buses, commodity_sources, transformers, renewables, demand, storages, powerlines, financial and timeseries].\\n\\\n The following sheets are found: {}\".format(xls.sheet_names)\n raise Exception(err_msg)\n\n # set datetime index\n nodes_data[\"timeseries\"].set_index(\"timestamp\", inplace=True)\n nodes_data[\"timeseries\"].index = pd.to_datetime(\n nodes_data[\"timeseries\"].index\n )\n\n logger.info(\"Data from Excel file {} imported in as nodes data.\".format(excel_path))\n\n return nodes_data", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def construct(self):\n self._content.sort(key=lambda x: (x.parent, x.index))\n i=0\n j=1\n while i<len(self._content):\n while j<len(self._content):\n if self._content[j].parent == self._content[i].index:\n self._content[i].children.append(self._content[j])\n j+=1\n else:\n break\n i+=1", "def fix_pmathml(xml):\r\n for k in xml:\r\n tag = gettag(k)\r\n if tag == 'mrow':\r\n if len(k) == 2:\r\n if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':\r\n idx = xml.index(k)\r\n xml.insert(idx, deepcopy(k[0]))\t # drop the <mrow> container\r\n xml.insert(idx + 1, deepcopy(k[1]))\r\n xml.remove(k)\r\n fix_pmathml(k)", "def auto_convert(self):\n nodes_converted = []\n for node_type in self.conversion_spec_sheet:\n print('searching for: %s' % node_type)\n found_nodes = self.list_nodes(node_type)\n print('found: %s nodes' % len(found_nodes))\n for node in found_nodes:\n new_node = self.convert(node)\n nodes_converted.append([node, new_node])\n\n return nodes_converted", "def generate_tree(csv_data: List[List[str]], order: List[str]) -> List[NodeList]:\n tree = []\n\n for row in csv_data:\n branch = generate_branch(row, order)\n if not branch:\n continue\n\n branch[0], root_result = level_exists(branch[0], tree)\n\n for i in range(len(branch) - 1):\n branch[i + 1], result = level_exists(branch[i + 1], branch[i][\"children\"])\n if not result:\n branch[i][\"children\"].append(branch[i + 1])\n\n if not root_result:\n tree.append(branch[0])\n return tree", "def _walk(self, level=0):\n l_dict = self.list_all()\n indent = level * \" \"\n for node in l_dict[\"nodes\"]:\n print(indent + \"node\", node)\n for group in l_dict[\"groups\"]:\n print(indent + \"group: \", group)\n with self.open(group) as hdf_group:\n hdf_group._walk(level=level + 1)", "def test_hierarchy_jumps(self):\n it = [\n \"[[Chapter]] Chapter I\",\n \"This is chapter I text\",\n \"[[Article]] Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n def identifier(x):\n reg = re.compile(r'\\[(\\d+\\_?(\\d+)?)[a-z]?\\]')\n return int(reg.search(x).groups(0)[0])\n\n reading_order = sorted(doc.graph.nodes(), key=identifier)\n\n expected = [\n \"ROOT [0]\",\n \"Chapter [1]\",\n \"Article [2]\",\n ]\n\n self.assertListEqual(reading_order, expected)", "def fillTreeView(self):\n #Parcours des sections (qui sont des ensembles)\n\n for section in self.getBindings():\n self.__treeB.insert(\"\", END,iid=section, text= section.capitalize(), open=True, tag=\"header\")\n for binding in self.getBindings()[section]:\n bd = self.getBindings()[section][binding]\n self.__listeItemTreeview.append(self.__treeB.insert(section, END,iid=section+binding, text=binding.capitalize(), value=(bd[\"description\"], \"; \".join(bd[\"bindings\"]))))\n\n\n self.__treeB.tag_configure(\"header\", font=\"arial 10 bold\") # à voir si on garde une stylisation comme ça", "def xle_head_table(folder):\r\n # open text file\r\n df = {}\r\n for infile in os.listdir(folder):\r\n\r\n # get the extension of the input file\r\n filename, filetype = os.path.splitext(folder + infile)\r\n basename = os.path.basename(folder + infile)\r\n if filetype == '.xle':\r\n # open text file\r\n with open(folder + infile, \"rb\") as f:\r\n d = xmltodict.parse(f, xml_attribs=True, encoding=\"ISO-8859-1\")\r\n # navigate through xml to the data\r\n data = list(d['Body_xle']['Instrument_info_data_header'].values()) + list(\r\n d['Body_xle']['Instrument_info'].values())\r\n cols = list(d['Body_xle']['Instrument_info_data_header'].keys()) + list(\r\n d['Body_xle']['Instrument_info'].keys())\r\n\r\n df[basename[:-4]] = pd.DataFrame(data=data, index=cols).T\r\n allwells = pd.concat(df)\r\n allwells.index = allwells.index.droplevel(1)\r\n allwells.index.name = 'filename'\r\n allwells['trans type'] = 'Solinst'\r\n allwells['fileroot'] = allwells.index\r\n allwells['full_filepath'] = allwells['fileroot'].apply(lambda x: folder + x + '.xle', 1)\r\n\r\n return allwells", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def buildnemxml(self):\n for n in sorted(self._objs.keys()):\n emanenode = self._objs[n]\n emanenode.buildnemxmlfiles(self)", "def write_cost_comparison(worksheet, workbook, nodes, row, level, money):\n # write the data, start from row\n while row < (len(nodes)+4):\n node = nodes[row-4]\n if node:\n currentlevel = int(node[1][-1]) -1\n # current level\n if currentlevel == level:\n indent = workbook.add_format()\n indent.set_indent(currentlevel)\n if 'red' in node[2]:\n indent = add_to_format(indent, {'font_color': 'red'}, workbook)\n worksheet.write(row, 0, node[0].Name, indent)\n worksheet.write(row, 1, node[0].Total, money)\n worksheet.write(row, 2, node[0].Ordered, money)\n worksheet.write(row, 3, node[0].Invoiced, money)\n row+=1\n # next level\n elif currentlevel > level:\n parentrow = row-1\n row = write_cost_comparison(worksheet, workbook, nodes, row, currentlevel, money)\n # write the parent subtotal\n worksheet.write_formula(parentrow, 1, '{=SUBTOTAL(9, B'+str(parentrow+2)+':B'+str(row)+'}', money)\n worksheet.write_formula(parentrow, 2, '{=SUBTOTAL(9, C'+str(parentrow+2)+':C'+str(row)+'}', money)\n worksheet.write_formula(parentrow, 3, '{=SUBTOTAL(9, D'+str(parentrow+2)+':D'+str(row)+'}', money)\n # previous level, break\n else:\n break\n return row", "def generate_branch(row: List[str], order: List[str]) -> NodeList:\n lvl_node = []\n\n for i in range(1, len(row), len(order)):\n lvl_values = row[i : i + len(order)]\n if all(lvl_values):\n node = dict(zip(order, lvl_values))\n node[\"children\"] = []\n lvl_node.append(node)\n\n return lvl_node", "def sortby(tree, col, descending):\n # grab values to sort\n data = [(tree.set(child, col), child) for child in tree.get_children('')]\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, int(not descending)))", "def iterate(elt, stack, lvl):\n global DICT, DTA\n row = [\"\" for i in xrange(len(FIELDS))]\n for k in elt.keys():\n if k in DICT.keys():\n row[DICT.get(k)] = elt.get(k)\n if elt.text != None:\n row[DICT.get(\"text\")] = elt.text\n row[DICT.get(\"tag\")] = elt.tag\n row[DICT.get(\"lvl\")] = lvl\n try:\n row[DICT.get(\"idx\")] = stack[1] # child of root element\n except (IndexError,) as e:\n row[DICT.get(\"idx\")] = \"root\"\n row[DICT.get(\"stack\")] = ';'.join([str(i) for i in stack])\n row = [str(i) for i in row]\n DTA.append(row)\n idx = -1 # cheating a little: idx = 0 \n lvl += 1\n for elt in elt:\n idx += 1\n iterate(elt, stack+[idx], lvl)", "def write_budget_data(worksheet, workbook, nodes, row, level, money, bold):\n # write the data, start from row\n while row < (len(nodes)+4):\n node = nodes[row-4]\n if node:\n currentlevel = int(node[1][-1]) -1\n # current level\n if currentlevel == level:\n # print budget group\n if node[0].type == 'BudgetGroup':\n budgettotal = add_to_format(money, {'bold': True}, workbook)\n nameformat = add_to_format(bold, {'indent': currentlevel}, workbook)\n worksheet.write(row, 0, node[0].Name, nameformat)\n worksheet.write(row, 4, node[0].Total, budgettotal)\n # print budget items\n else:\n indent = workbook.add_format()\n indent.set_indent(currentlevel)\n textcolor = workbook.add_format()\n bimoney = money\n if node[0].Variation:\n indent = add_to_format(indent, {'font_color': 'red'}, workbook)\n textcolor.set_font_color('red')\n bimoney = add_to_format(bimoney, {'font_color': 'red'}, workbook)\n worksheet.write(row, 0, node[0].Name, indent)\n worksheet.write(row, 1, node[0].Unit, textcolor)\n worksheet.write(row, 2, node[0].Quantity, textcolor)\n worksheet.write(row, 3, node[0].Rate, bimoney)\n worksheet.write_formula(row, 4, '{=C'+str(row+1)+'*D'+str(row+1)+'}', bimoney)\n row+=1\n # next level\n elif currentlevel > level:\n parentrow = row-1\n row = write_budget_data(worksheet, workbook, nodes, row, currentlevel, money, bold)\n # write the parent subtotal\n budgettotal = add_to_format(money, {'bold': True}, workbook)\n worksheet.write_formula(parentrow, 4, '{=SUBTOTAL(9, E'+str(parentrow+2)+':E'+str(row)+'}', budgettotal)\n # previous level, break\n else:\n break\n return row", "def insertLeaf(T,i):\r\n T.data.append(i) \r\n T.data.sort(key=lambda x: x.word)", "def process_files(geodata_name, inp_dict):\n input_paths = inp_dict[\".xls\"][:]\n try:\n data = geodata(geodata_name)\n except UnicodeDecodeError:\n showerror(\"Ошибка кодирования\", \"Файл данных должен быть закодирован в utf-8\")\n data = geodata(askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=[(\"Файл данных txt\", \".txt\")], title=\"Выберите файл данных txt\")[0])\n\n\n for book in input_paths:\n book_flag = False\n with open_workbook(book, formatting_info=True) as rb:\n header = False\n wb = copy(rb)\n for numb, sheet in enumerate(rb.sheets()):\n column = \"False\"\n for row in range(sheet.nrows):\n if column != \"False\":\n for data_row in data:\n if sheet.cell(row, column).value == data_row[0]:\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n break\n else:\n for col in range(sheet.ncols):\n for data_row in data:\n if sheet.cell(row, col).value == data_row[0]:\n column = col\n book_flag = True\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n if not header:\n header = True\n style_list = get_xlwt_style_list(rb)\n wb.get_sheet(numb).write(0, sheet.ncols, u\"Широта\", style=style_list[sheet.cell_xf_index(0, 0)])\n wb.get_sheet(numb).write(0, sheet.ncols+1, u\"Долгота\", style=style_list[sheet.cell_xf_index(0, 0)])\n break\n if book_flag:\n if not os.path.isdir(\"out\"):\n os.mkdir(\"out\")\n f_out = get_output_name(book)\n wb.save(f_out)\n inp_dict[\"del\"].append(f_out)\n inp_dict[\"out\"].append(f_out)\n return inp_dict", "def getHierarchies():", "def getHierarchies():", "def _parse_cells(self):\n self.cells_with_solutions = []\n self.cells_without_solutions = []\n for cell in self.original_cells:\n if is_test_cell(cell):\n self.tests.append(read_test(cell))\n else:\n self.cells_with_solutions.append(cell)\n self.cells_without_solutions.append(replace_cell_solutions(cell))", "def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)", "def add_extra_content(object_name, list):\n sheet_list = [\"sheet1\", \"sheet2\", \"sheet3\", \"sheet4\", \"sheet5\"]\n target_path = os.path.join(os.path.join(os.path.abspath('..'), 'results'), object_name)\n supplement_path = os.path.join(os.path.join(os.path.join(os.path.abspath('..'), 'results'),\n 'supplement'), object_name)\n save_path = os.path.join(os.path.join(os.path.join(os.path.abspath('..'),\n 'results'), 'save'), object_name)\n if os.path.exists(save_path):\n pass\n else:\n os.mkdir(save_path)\n\n for name in list:\n\n target_file_path = os.path.join(target_path, name)\n supplement_file_path = os.path.join(supplement_path, name)\n # 获得工作簿的对象\n target_work_book = xlrd.open_workbook(target_file_path)\n supplement_work_book = xlrd.open_workbook(supplement_file_path)\n # 遍历每一个sheet\n for sheet in sheet_list:\n target_temp_sheet = target_work_book.sheet_by_name(sheet)\n supplement_temp_sheet = supplement_work_book.sheet_by_name(sheet)\n # 获得行数\n rows_target = target_temp_sheet.nrows\n rows_supplement = target_temp_sheet.nrows\n # 获得列数\n cols = target_temp_sheet.ncols\n for row in range(1, rows_supplement):\n for col in range(0, cols):\n target_temp_sheet.write(rows_target + row - 1, col, supplement_temp_sheet.cell_value())\n\n temp_book = xlwt.Workbook(encoding='utf-8')", "def update_tree(tree, subtree_hierarchy):\n new_tree = subtree_hierarchy.copy()\n for bg_pop, row in subtree_hierarchy.iterrows():\n # Remove not showing pops from new_tree\n if row['To_show'] == 'no':\n new_tree = new_tree.drop(bg_pop)\n continue\n\n # Find Parent\n parent_to_show = row['Parent']\n # If bg_pop has no Parent, skip\n if parent_to_show == '':\n continue\n # If Parent not in subtree, skip\n if parent_to_show not in subtree_hierarchy.index:\n continue\n # If Parent has To_show = 'no', find Parent of Parent, etc.\n while subtree_hierarchy.at[parent_to_show, 'To_show'] == 'no':\n parent_to_show = subtree_hierarchy.at[parent_to_show, 'Parent']\n # Set Parent to show in new_tree\n new_tree.at[bg_pop, 'Parent'] = parent_to_show\n\n new_tree = new_tree.reset_index()[['index', 'BG_population', 'Parent', 'BG_label']]\n # For pairs ('BG_population', 'Parent') that has coords, add coords\n new_tree_pos = new_tree.merge(tree.reset_index(), how='left', on=['BG_population', 'Parent'])\n new_tree_pos = new_tree_pos[['index_x', 'BG_population', 'Parent', 'posX', 'posY', 'BG_label_x']] \\\n .rename(columns={'index_x': 'index', 'BG_label_x': 'BG_label'}) \\\n .fillna('')\n\n return new_tree_pos", "def test_assemble_xml_file(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n worksheet.str_table = SharedStringTable()\n worksheet.select()\n cell_format1 = Format({\"xf_index\": 1})\n cell_format2 = Format({\"xf_index\": 2})\n\n worksheet.merge_range(\"B3:C3\", \"Foo\", cell_format1)\n worksheet.merge_range(\"A2:D2\", \"\", cell_format2)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"A2:D3\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"1:4\">\n <c r=\"A2\" s=\"2\"/>\n <c r=\"B2\" s=\"2\"/>\n <c r=\"C2\" s=\"2\"/>\n <c r=\"D2\" s=\"2\"/>\n </row>\n <row r=\"3\" spans=\"1:4\">\n <c r=\"B3\" s=\"1\" t=\"s\">\n <v>0</v>\n </c>\n <c r=\"C3\" s=\"1\"/>\n </row>\n </sheetData>\n <mergeCells count=\"2\">\n <mergeCell ref=\"B3:C3\"/>\n <mergeCell ref=\"A2:D2\"/>\n </mergeCells>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def parseHierarchicalRowHeader(self, i, j) :\n \n # Use the rowhierarchy to create a unique qname for the cell's contents, \n # give the source_cell's original value as extra argument\n self.log.debug(\"Parsing HierarchicalRowHeader\")\n \n # Add all the values\n for (index, value) in self.rowhierarchy[i].items():\n prop = self.property_dimensions[index]\n self.row_dimensions.setdefault(i,{})\n self.row_dimensions[i][self.namespaces['scope'][prop]]= Literal(value)\n \n # Relate the hierarchical headers\n keys = self.rowhierarchy[i].keys()\n for i in range(len(keys)-1):\n prop_top = self.namespaces['scope'][self.property_dimensions[keys[i]]]\n prop_sub = self.namespaces['scope'][self.property_dimensions[keys[i+1]]]\n self.graph.add((prop_sub, self.namespaces['tablink']['subPropertyOf'], prop_top))", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def create_index_row_logical_model_with_supplements(record, directory):\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(\"<b></b>\", 'lxml')\n tr = soup.new_tag(\"tr\")\n td_supplement = soup.new_tag(\"td\")\n\n if \"European Supplement\" in record[\"supplement\"]:\n span_supplement = soup.new_tag(\"spam\")\n span_supplement['class'] = \"badge badge-secondary\"\n span_supplement.string = \"European Supplement\"\n td_supplement.insert(1,span_supplement)\n tr.insert(1,td_supplement)\n\n td_ic_name = soup.new_tag(\"td\")\n td_ic_name[\"data-order\"] = record[\"class name\"]\n if record[\"stereotype\"] != \"missing data\": #The record is a class\n filename = classname_to_filename(str(record['class name']))\n url = directory+filename\n text = record[\"class name\"]\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_ic_name.insert(1,new_link)\n tr.insert(2,td_ic_name)\n else: #The record is a property\n td_ic_name.string = record[\"class name\"]\n tr.insert(2,td_ic_name)\n \n td_dc_name = soup.new_tag(\"td\")\n td_dc_name[\"data-order\"] = str(record[\"property name\"])\n if record[\"stereotype\"] == \"missing data\": #The record is a property\n filename = str(record['class name'])+\".html#\"+str(record['property name'])\n filename = filename.replace(\"/\", \"-\")\n filename = filename.replace(\"*\", \"-\")\n filename = filename.replace(\" \", \"\")\n filename = filename.replace(\"\\t\", \"\")\n filename = filename.replace(\"\\n\", \"\")\n url = directory+filename\n text = str(record[\"property name\"])\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_dc_name.insert(1,new_link)\n tr.insert(3,td_dc_name)\n else: #The record is a class\n td_dc_name.string = \"-\"\n tr.insert(3,td_dc_name)\n\n if record[\"definition\"] != \"\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"definition\"])\n tr.insert(4,td_def)\n \n if record[\"type\"] != \"missing data\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"type\"])\n tr.insert(5,td_def)\n else:\n td_def = soup.new_tag(\"td\")\n td_def.string = \"-\"\n tr.insert(5,td_def)\n \n return tr", "def parse_columns():\n # Fetch columns letters\n old_column_letter = get_old_numbers_column()\n new_columns_letters = get_new_numbers_columns()\n\n # Fetch old numbers\n old_column_cells = sheet[f'{old_column_letter}15':f'{old_column_letter}120']\n old_numbers_cells = []\n for ii in old_column_cells:\n if re.match('[0-9]{7}', str(ii[0].value)):\n old_numbers_cells.append(ii)\n print(old_numbers_cells)\n\n ##----- WORKING UNTIL HERE -----##\n \n # Fetch all new numbers (for each format)\n # Creates an array of arrays of cells ([[CellA, CellB], [CellC, CellD]])\n for ii in old_numbers_cells:\n new_numbers_cells_array = []\n for ij in new_columns_letters:\n new_columns_cells = [f'{ij}15', f'{ij}120']\n new_numbers_cells = []\n for jj in new_columns_cells:\n if re.search('[0-9]{7}'):\n new_numbers_cells.append(ii)\n new_numbers_cells_array.append(new_numbers_cells)\n\n # Combines all the cells :\n # Creates an array of tuples, easier to work with..\n work_tuples = []\n for ii in old_numbers_cells:\n # First we create an array..\n work_array = [int(ii.value)]\n # Then we fill it..\n for ij in new_numbers_cells_array:\n # Using the current position in the old cells array...\n work_array.append(ij[old_numbers_cells.index(ii)])\n # Finally, the array is parsed as a tuple and added to the list\n work_tuples.append(tuple(work_array))\n\n return work_tuples", "def test_hierarchy_element_sorting(self):\n hs = HierarchyScheme(None, (\"foo\", \"bar\"), \"foobar\")\n # Atom lists are used here to indicate the positions\n # that this element may have in the \"proper\" sorting.\n hs.add_hierarchy_element((\"Z\", 10), [13])\n hs.add_hierarchy_element((\"Z\", 5), [12])\n # Switch two values so the list isn't just reversed from the correct sorting\n hs.add_hierarchy_element((\"None\", \"10\"), [10])\n hs.add_hierarchy_element((\"Y\", 10), [11])\n hs.add_hierarchy_element((\"A\", 10), [8, 9])\n hs.add_hierarchy_element((\"A\", \"10\"), [8, 9])\n hs.add_hierarchy_element((\"A\", 1), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \"1\"), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \" 1\"), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \"None\"), [4])\n hs.add_hierarchy_element((\"A\", \" \"), [2, 3])\n hs.add_hierarchy_element((\"A\", \"\"), [2, 3])\n hs.add_hierarchy_element((\" \", \"10\"), [0, 1])\n hs.add_hierarchy_element((\"\", \"10\"), [0, 1])\n hs.sort_hierarchy_elements()\n\n # Compare the sorted hierarchyelements to their expected sortings\n # (assuming that the atom indices indicate valid positions for the resulting elements in the proper sorting)\n for idx, ele in enumerate(hs.hierarchy_elements):\n assert idx in ele.atom_indices", "def process(workbook: Any, contents: Iterable) -> None:\n worksheet_name = 'SAN Hosts'\n worksheet = workbook.get_sheet_by_name(worksheet_name)\n headers = get_parser_header(SYSTEM_NAME_TMPL)\n\n headers += [\n 'cluster_id', 'host_id', 'volume_id', 'map_id', 'creator', 'Hostname',\n 'cluster', 'fc_ports', 'type', 'iscsi_chap_name', 'perf_class'\n ]\n\n RowTuple = namedtuple('RowTuple', headers)\n build_header(worksheet, headers)\n headers = [\n 'cluster_id/@value', 'host_id/@value', 'volume_id/@value',\n 'creator/@value', 'name/@value', 'cluster/@value',\n 'fc_ports/@value', 'type/@value',\n 'iscsi_chap_name/@value', 'perf_class/@value'\n ]\n\n hosts_rows, lun_rows = [], [] # type: list\n for sys_content, all_content, host_content in contents:\n system_name = run_parser_over(sys_content, SYSTEM_NAME_TMPL)[0]\n all_map_content = '\\n'.join(all_content.split('\\n')[1:])\n host_content = '\\n'.join(host_content.split('\\n')[1:])\n\n doc_map = xmltodict.parse(all_map_content)\n map_details = search_tag_value(doc_map, 'map')\n maps = luns_occurrences(map_details, headers)\n lun_rows = [system_name + row for row in maps]\n\n doc_host = xmltodict.parse(host_content)\n host_details = search_tag_value(doc_host, 'host')\n flat_data_host = [flatten_dict(data) for data in host_details]\n hosts = ordered_jsons(flat_data_host,\n [headers[0], 'id/@value'] + headers[3:])\n\n hosts_rows += [system_name + host_row for host_row in hosts]\n\n no_cluster = filter(lambda x: x[1] == '-1', lun_rows)\n no_hosts = filter(lambda x: x[2] == '-1', lun_rows)\n clusters_hosts = filter(lambda x: x[2] != '-1' and x[1] != '-1', lun_rows)\n\n common_columns = (0, 2)\n rows_cluster = multiple_join(\n common_columns, [no_cluster, hosts_rows])\n\n common_columns = (0, 1)\n row_hosts = multiple_join(\n common_columns, [no_hosts, hosts_rows])\n\n common_columns = (0, 1, 2)\n row_all = multiple_join(\n common_columns, [clusters_hosts, hosts_rows])\n\n sub_rows = list(rows_cluster) + list(row_hosts) + list(row_all)\n rows = expand_rows(sub_rows, 3)\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(column_format(col_n), row_n)]\n cell.value = str.strip(col_value)\n style_value_cell(cell)\n if chr(col_n) != 'D':\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n 'SANHostsTable',\n 'SANHosts',\n final_col,\n final_row)", "def normalize_excelSheet(self, sheetname, conversion_dictionary):\n\n sheet = self.wb.sheet_by_name(sheetname)\n\n ami_data = []\n\n date_headers = [\"bibliographic.date\", \"technical.dateCreated\"]\n time_headers = [\"technical.durationHuman\"]\n\n #copy everything from the 3rd row to the last row with a filename\n for rownum in range(2, sheet.nrows):\n if sheet.cell(rownum, 0):\n ami_data.append(sheet.row_values(rownum))\n\n for i in range(0, sheet.ncols):\n #normalize header\n header_entry = self.get_headerEntryAsString(sheetname, i)\n ami_data[0][i] = self.normalize_headerEntry(\n header_entry,\n conversion_dictionary)\n\n #convert excel dates\n if ami_data[0][i] in date_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"date\")\n\n #convert excel times\n if ami_data[0][i] in time_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"time\")\n\n ami_df = self.normalize_values(ami_data)\n\n return ami_df", "def words_in_sorted_order(self):\n print 'Words in sorted order:'\n self.words_in_sorted_order_utils(self.root)", "def _parse(self, tree):\n date_el = self.get_etree().xpath(DATE_XP)[0]\n self.date = date_el.attrib['value']\n self.year, self.month, self.day = self.date.split('-')\n self.date_text = date_el.text\n\n def resolve_type(element):\n return element.attrib.get('type', '').lower().strip('. ')\n\n def index_entity(nodes, model, article):\n for n in nodes:\n m = model(n, article)\n if m.ok:\n db.session.add(m)\n\n def get_html(article):\n return html.tostring(tei.build(etree.Element('article'), article))\n\n root = self.get_etree()\n for section in root.xpath('//div1'):\n section_type = resolve_type(section)\n if not section_type:\n continue\n for subsection in section.xpath('./div2'):\n subsection_type = resolve_type(subsection)\n if not subsection_type:\n continue\n for article in subsection.xpath('./div3'):\n article_type = resolve_type(article)\n if article_type == 'ad-blank':\n continue\n a = Article(issue_id=self.id,\n date=self.date,\n section_type=section_type,\n subsection_type=subsection_type,\n article_type=article_type,\n xpath=root.getpath(article),\n content=get_html(article))\n db.session.add(a)\n db.session.flush()\n index_entity(article.xpath('.//persName'), PersName, a)\n index_entity(article.xpath('.//placeName'), PlaceName, a)\n index_entity(article.xpath('.//orgName'), OrgName, a)\n index_entity(article.xpath('.//rs'), RefString, a)", "def post_order(self, verbose=False):\n if not self.root:\n return []\n alist = []\n self.pre_order_helper(self.root, alist, verbose)\n return alist", "def run(self,filename):\n \n # read the History\n self.histobj = history.readTreeFromFilename(filename)\n\n # create the splitter widget\n self.splitter = QSplitter()\n self.splitter.resize(800,600)\n\n self.treeview = QTreeWidget(self.splitter)\n self.treeview.setRootIsDecorated(True)\n self.treeview.headerItem().setText(0,\"Name\")\n\n # now set up the tree\n self.rootnode = QTreeWidgetItem(self.treeview,[filename])\n self.rootnode.setExpanded(True)\n self.rootnode.metanode = (self.histobj.thismeta,filename)\n \n for parent in sorted(self.histobj.directparents.keys()):\n node = QTreeWidgetItem(self.rootnode,[parent])\n node.setExpanded(True)\n node.metanode = (self.histobj.directparents[parent],parent)\n self.alreadydisplayedkeys[parent] = node\n \n self.findParents(node,parent)\n \n # create a text area that can display HTML\n self.textarea = QTextEdit(self.splitter)\n self.textarea.setReadOnly(True)\n \n # set up the events\n QObject.connect(self.treeview,SIGNAL(\"currentItemChanged(QTreeWidgetItem *,QTreeWidgetItem *)\"),self.currentItemChanged)\n \n # set the root element\n self.treeview.setCurrentItem(self.rootnode)\n\n # show the app\n self.splitter.setWindowTitle(\"HistoryView\")\n self.splitter.show()\n self.exec_()", "def parse_treefile_general(treefile, get_questions=False):\n\n\n f = open(treefile, \"r\")\n file_data = f.readlines()\n f.close()\n \n file_data = [line.strip(\"\\n\") for line in file_data]\n data = [line for line in file_data if line[:2] != \"QS\"] ## strip qwuestions \n \n if get_questions:\n questions = [line for line in file_data if line[:2] == \"QS\"] \n questions = [line.replace(\"{\", \"\").replace(\"}\", \"\") for line in questions]\n questions = [line.strip(\" \") for line in questions]\n questions = [re.split(\"\\s+\", line) for line in questions]\n for line in questions:\n assert len(line) == 3,line # \"Line does not contain 3 items: %s\"%(\" \".join(line))\n questions = dict([(line[1], line[2]) for line in questions])\n\n data = \"\\n\".join(data)\n \n bracketed = re.findall(\"\\{[^\\}]*\\}\",data)\n \n #print bracketed\n #### bracketed should consist of name, tree, name, tree... -- sort it out\n if len(bracketed) % 2 != 0:\n print(\"bracketed should consist of name, tree, name, tree\")\n sys.exit(1)\n \n data = []\n i=1\n for item in bracketed:\n #print item\n if i%2!=0.0: ## if i odd\n name = item\n else:\n tree = item\n data.append((name,tree))\n i+=1\n\n def strip_quotes(x):\n x = string.strip(x, '\"') #(\"\\_|-\", \"\", x) \n return x \n \n def to_num(x):\n if x[0] == \"-\" or x[0] == \"0\":\n return int(math.fabs(int(x)))\n else:\n return strip_quotes(x)\n #print data\n names_trees = []\n for (name, treestring) in data:\n \n #### tree\n treestring = treestring.strip(\"{} \\n\")\n\n treestring = re.split(\"\\n\", treestring)\n treestring = [line.strip(\" \\n\") for line in treestring] \n treestring = [re.split(\"\\s+\", line) for line in treestring] \n\n tree = [(to_num(num), quest, to_num(left), to_num(right)) for (num, quest, left, right) in treestring]\n\n\n ### name\n treestring = name.strip(\"{} \\n\")\n\n names_trees.append((name, tree))\n \n ##print names_trees \n if get_questions:\n return names_trees, questions \n else:\n return names_trees", "def whichElementIsInTheLevel(self):\n\n listElements = list()\n\n #We read each line\n for row in self._get_grille_csv():\n \n #We read each cell of each line\n for cell in row:\n if cell in listElements or cell == \"\":\n pass\n \n else:\n listElements.append(cell)\n\n self.loadingLevelElements(listElements)", "def _fix_treetags(self, tree):\n for element in tree:\n element.tag = element.tag.split('}')[1]\n if len(element.getchildren()) > 0:\n self._fix_treetags(element)\n return tree", "def parse_xml(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n #baseInfo['folder'] = tree.find('folder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n if obj.find('score') is None:\r\n obj_struct['score']=\"\"\r\n else:\r\n obj_struct['score'] = obj.find('score').text\r\n if obj.find('region') is None:\r\n obj_struct['region']=\"\"\r\n else:\r\n obj_struct['region'] = obj.find('region').text\r\n if obj.find('imageptr') is None:\r\n obj_struct['imageptr']=\"\"\r\n else:\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n # obj_struct['score'] = obj.find('score').text\r\n # obj_struct['region'] = obj.find('region').text\r\n # obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def create_test_sort_outline(self) -> None:\n p = self.c.p\n assert p == self.root_p\n assert p.h == 'root'\n table = (\n 'child a',\n 'child z',\n 'child b',\n 'child w',\n )\n for h in table:\n child = p.insertAsLastChild()\n child.h = h", "def _sort_tree(self):\n \n self._children = sorted(self._children, key=lambda x : x.id_num)\n for c in self.children:\n if hasattr(c, '_sort_tree'):\n c._sort_tree()\n \n return", "def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return", "def vacuum(self, nb):\n sheets = nb.root.xpath('./sheet')\n keepers = {}\n for sheet in sheets:\n # get all unique logids from the relevant elements in the sheet\n logids = set(sheet.xpath('.//@logid'))\n for logid in logids:\n # get the log\n log = nb.get_log(logid)\n # get all ipython-cell elements from ipython-blocks with this logid\n cells = sheet.xpath('.//ipython-block[@logid=\"%s\"]/ipython-cell' %\n logid)\n # get all ipython-figure elements with this logid\n figs = sheet.xpath('.//ipython-figure[@logid=\"%s\"]' % logid)\n logkeepers = keepers.get(logid, set())\n logkeepers.update((x.get('type'), x.get('number')) for x in cells)\n logkeepers.update(('figure', x.get('number')) for x in figs)\n keepers[logid] = logkeepers\n for logid in logids:\n log = nb.get_log(logid)\n logkeepers = keepers[logid]\n cells = list(log)\n for cell in cells:\n num = cell.get('number')\n for subcell in list(cell):\n if (subcell.tag, num) not in logkeepers:\n cell.remove(subcell)\n if len(cell) == 0:\n log.remove(cell)", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def _LAYOUT_XML(self,dfName):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n dctDfsLAYOUT=None\r\n\r\n if dfName not in self.dataFrames.keys():\r\n logStrFinal=\"{0:s}{1:s} not in dataFrames.keys()!\".format(logStr,dfName)\r\n logger.debug(logStrFinal) \r\n raise XmError(logStrFinal) \r\n else:\r\n logger.debug(\"{0:s}{1:s} in dataFrames.keys().\".format(logStr,dfName)) \r\n\r\n if 'LAYOUT_XML' not in self.dataFrames[dfName].columns.tolist():\r\n logStrFinal=\"{0:s}column LAYOUT_XML not in dataFrame!\".format(logStr)\r\n logger.debug(logStrFinal) \r\n raise XmError(logStrFinal) \r\n else:\r\n logger.debug(\"{0:s}column LAYOUT_XML in dataFrame.\".format(logStr))\r\n \r\n try: \r\n dctDfsLAYOUTLst=[]\r\n for index, row in self.dataFrames[dfName].iterrows(): \r\n xmlBLOB=row['LAYOUT_XML']\r\n logger.debug(\"{:s}xmlBLOB={!s:s}\".format(logStr,xmlBLOB)) \r\n xmlBLOBInB=base64.b64decode(xmlBLOB)\r\n logger.debug(\"{:s}xmlBLOBInB={!s:s}\".format(logStr,xmlBLOBInB)) \r\n xmlBLOBInStr=xmlBLOBInB.decode('cp1252') \r\n logger.debug(\"{:s}xmlBLOBInStr={:s}\".format(logStr,re.sub('\\r\\n[ ]*','',xmlBLOBInStr))) \r\n root = ET.fromstring(xmlBLOBInStr)\r\n dctDfsLAYOUTLst.append(Xm._xmlRoot2Dfs(root))\r\n\r\n # TabellenTypen ermitteln\r\n tabTypes=set()\r\n for dct in dctDfsLAYOUTLst:\r\n tabTypes=tabTypes.union(dct.keys())\r\n\r\n dctDfsLAYOUT={}\r\n # ueber alle Tabellen\r\n for tabType in tabTypes: \r\n tabTypeTables=[]\r\n for dct in dctDfsLAYOUTLst:\r\n if tabType not in dct.keys():\r\n continue\r\n else:\r\n # Tabelle anhängen\r\n tabTypeTables.append(dct[tabType])\r\n # ... wenn nicht alle Tabellen dieselben Spalten haben ?! ...\r\n dctDfsLAYOUT[tabType]=pd.concat(tabTypeTables)\r\n \r\n except UnicodeDecodeError as e: \r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.')) \r\n return dctDfsLAYOUT", "def _list_link_to_parents(self, col_name=\"x\"):\n\n # col_name may only be x, y, or name\n assert col_name in ['x', 'y', 'name'], f\"Not recognized: {col_name}\"\n\n # Populate a list which will be output\n output_list = []\n\n # Iterate over each row\n for _, r in self.df.iterrows():\n\n # If there is no parent for this row\n if pd.isnull(r['parent']) or r['parent'] < 0:\n\n # Skip it\n continue\n\n # If there is a parent for this row\n else:\n\n # Add the item to the list\n output_list.append(r[col_name])\n\n # The value placed in between the node and its parent\n # depends on whether it is X or Y (or name)\n\n # Get the value for the parent\n parent_val = self.df.loc[r['parent'], col_name]\n\n # Moving along the x axis\n if col_name == \"x\":\n\n # The intermediate node has the X coordinate of the parent\n output_list.append(parent_val)\n\n # Moving along the y axis\n elif col_name == \"y\":\n\n # The intermediate node has the Y coordinate of the child\n output_list.append(r[col_name])\n\n # For the 'name' values\n elif col_name == \"name\":\n\n # The intermediate node has no name\n output_list.append(None)\n\n # Add its parent\n output_list.append(parent_val)\n\n # Add a NaN to separate it\n output_list.append(None)\n\n # Return the list\n return output_list", "def organize(self):\n for position in os.listdir():\n if os.path.isdir(position):\n temp_folder = Folder(position)\n self.folders.append(temp_folder)\n self._add_all_files(temp_folder)\n\n self._validate_extensions()", "def sort_rows(self) -> list:\r\n\r\n # Sort Inner rows\r\n done_rows = []\r\n\r\n # Sort each row independently\r\n for r in self.rows:\r\n \r\n # Insertion Sort by x coordinate\r\n r = r[::-1] # reverse the list first to speed up sorting\r\n # this is faster because the list is\r\n # already partly reverse sorted\r\n \r\n sorted_row = []\r\n \r\n while r != []:\r\n\r\n # Take element from row\r\n el = r.pop()\r\n\r\n # Find where it belongs\r\n curr = 0\r\n while curr < len(sorted_row) and \\\r\n el.get_center()[0] > sorted_row[curr].get_center()[0]:\r\n \r\n curr += 1\r\n\r\n # Insert it there\r\n sorted_row.insert(curr, el)\r\n\r\n # collect sorted rows\r\n done_rows.append(sorted_row)\r\n\r\n\r\n # Sort Inter rows\r\n finished = []\r\n \r\n while done_rows != []:\r\n\r\n # Take out a row\r\n el = done_rows.pop()\r\n\r\n # Find where it belongs\r\n curr = 0\r\n while curr < len(finished) and \\\r\n (finished[curr][0].get_center()[1]\r\n + finished[curr][len(finished[curr]) - 1].get_center()[1]) \\\r\n / 2 < (el[0].get_center()[1]\r\n + el[len(el) - 1].get_center()[1]) / 2:\r\n curr += 1\r\n\r\n # Insert it there\r\n finished.insert(curr, el)\r\n\r\n\r\n # Set the sorted rows\r\n self.rows = finished", "def _collect_tree(index: dict) -> Iterator[list]:\n yield [\"geo_id\", \"geo_parent_id\", \"geo_type\", \"name\", \"name_uk\"]\n # count up to nearest hundred frrom max id, and append added items from there\n offset = partial(offset_id, (max(index) // 100 + 1) * 100)\n order = tuple(geo.GeoMeta.registry)\n # sorted by decreasing area\n for key in sorted(index, key=lambda x: order.index(index[x].item.type)):\n record = index[key]\n geo_id = offset(record.id)\n geo_item = record.item\n geo_parent_id = geo_item.parent and offset(geo_item.parent.id) # can be None\n geo_type = geo_item.type\n yield [geo_id, geo_parent_id, geo_type, *iter(geo_item)]", "def split_file(filename):\n \n \n#tree = ElementTree.ElementTree()\n#root = ElementTree.Element(\"root\")\n#a = ElementTree.Element(\"a\")\n#a.text = \"1\"\n#root.append(a)\n#tree._setroot(root)\n#tree.write(\"sample.xml\" \n\n \n find_counter = 0\n check_counter = 0 \n tree_file = files()\n #outfile = next(tree_file)\n \n \n with open(filename,mode =\"r\") as file :\n \n for line in file :\n \n if line.startswith(\"<?xml\"):\n outfile = next(tree_file)\n outfile.write(line)", "def __manage_tree(self):\n for pre, fill, node in RenderTree(self.tree):\n if node.name is 'count':\n logger.info(\n \"Tree info %s%s: %s %s p/s attack: %s\",\n pre, node.name, node.value, node.pps, node.attack)\n else:\n logger.info(\"Pre - [%s], Fill - [%s], Node - [%s]\",\n pre, fill, node.name)", "def extracter(spreadsheet, column_name):\n \tprint header, \"Running the extracter.\"\n \troot=Tkinter.Tk()\n\troot.withdraw()\n\troot.update()\n \tinput_folder=tkFileDialog.askdirectory(title=\"Inputfolder: Please choose a directory that contains your corpus files\")\n \troot=Tkinter.Tk()\n\troot.withdraw()\n\troot.update()\n \toutput_folder=tkFileDialog.askdirectory(title=\"Outputfolder: Please choose a directory to copy files into\")\n \tprint header, \"Copying files from '{}' to '{}'.\".format(input_folder, output_folder)\n \t#collecting input files\n \tinputfiles=[]\n\tprint \"Locating files.\"\t\n\tfor dirpath, subdirs, files in os.walk(input_folder):\n\t\tfor f in files:\n\t\t\tinputfiles.append(os.path.join(dirpath, f))\n\t\t\tif len(inputfiles) in [1000,2000,4000,8000,1600,24000]:\n\t\t\t\tprint \"{} files processed, still working.\".format(len(inputfiles))\n\tprint \"Found {} files.\".format(len(inputfiles))\n \t#read from spreadsheet\n \t# with open(spreadsheet, \"r\") as spreadsheet:\n# \t\tspreadsheet=pandas.read_csv(spreadsheet, encoding=\"utf-8\")\n \tnumbers_to_be_extracted= spreadsheet[column_name].unique()\n \tprint header, \"Gilbert numbers to be extracted:\"\n \tprint \",\".join([unicode(i) for i in numbers_to_be_extracted])\n\t#copying speaker files\n\tprint header, \"Copying speaker files.\"\n\tspeakerfiles=[f for f in inputfiles if re.match(\".*\\.txt\", os.path.split(f)[1])]\n\tos.mkdir(os.path.join(output_folder, \"speakers\"))\n\tfor s in speakerfiles:\n\t\tshutil.copy2(s, os.path.join(output_folder, \"speakers\"))\n \t#finding relevant input files\n \tresult=[]\n\tfor number in numbers_to_be_extracted:\n\t\tprint \"Processing {}, creating folder '{}'.\".format(number, number)\n\t\tos.mkdir(os.path.join(output_folder, unicode(number)))\n\t\tregex=\"(\\d+)-(\\d+)-(\\d+)-\"+number.astype('U')+\"-(\\D+)\\.wav\"\n \t\tfindings= [f for f in inputfiles if re.match(regex, os.path.split(f)[1])]\n \t\tresult= result+findings\n \t\tfor find in findings:\n \t\t\tshutil.copy2(find, os.path.join(output_folder, unicode(number), os.path.split(find)[1]))\t\n \tprint header, \"{} files have been copied to {}.\".format(len(result), output_folder)", "def get_biosphere_2_3_name_migration_data():\n\n ws = get_sheet(\n dirpath / \"lci\" / \"ecoinvent elementary flows 2-3.xlsx\", \"ElementaryExchanges\"\n )\n\n def to_exchange(obj):\n obj[0][3] = u\"biosphere\"\n return obj\n\n def strip_unspecified(one, two):\n if two == \"unspecified\":\n return (one,)\n else:\n return (one, two)\n\n data = [\n (\n [\n ws.cell(row=row + 1, column=2).value, # Old name\n # Categories\n strip_unspecified(\n ws.cell(row=row + 1, column=10).value,\n ws.cell(row=row + 1, column=11).value,\n ),\n normalize_units(ws.cell(row=row + 1, column=7).value),\n u\"emission\", # Unit\n ],\n {\"name\": ws.cell(row=row + 1, column=9).value},\n )\n for row in range(1, ws.max_row)\n if ws.cell(row=row + 1, column=2).value\n and ws.cell(row=row + 1, column=9).value\n and ws.cell(row=row + 1, column=2).value != ws.cell(row=row + 1, column=9).value\n ]\n data = copy.deepcopy(data) + [to_exchange(obj) for obj in data]\n\n # Water unit changes\n data.extend(\n [\n (\n (\"Water\", (\"air\",), \"kilogram\", \"biosphere\"),\n {\"unit\": \"cubic meter\", \"multiplier\": 0.001},\n ),\n (\n (\n \"Water\",\n (\"air\", \"non-urban air or from high stacks\"),\n \"kilogram\",\n \"biosphere\",\n ),\n {\"unit\": \"cubic meter\", \"multiplier\": 0.001},\n ),\n (\n (\n \"Water\",\n (\"air\", \"lower stratosphere + upper troposphere\"),\n \"kilogram\",\n \"biosphere\",\n ),\n {\"unit\": \"cubic meter\", \"multiplier\": 0.001},\n ),\n (\n (\n \"Water\",\n (\"air\", \"urban air close to ground\"),\n \"kilogram\",\n \"biosphere\",\n ),\n {\"unit\": \"cubic meter\", \"multiplier\": 0.001},\n ),\n ]\n )\n\n return {\"fields\": [\"name\", \"categories\", \"unit\", \"type\"], \"data\": data}", "def parseXML(xml_file, xml_file2):\r\n #tree = ET.ElementTree(file=xml_file)\r\n tree = ET.ElementTree(file=xml_file)\r\n #print (tree.getroot())\r\n root = tree.getroot()\r\n\r\n #tree2 = ET.ElementTree(file=xml_file2)\r\n tree2 = ET.ElementTree(file=xml_file2)\r\n root2 = tree2.getroot()\r\n\r\n #print (\"tag=%s, attrib=%s\" % (root.tag, root.attrib))\r\n from prettytable import PrettyTable\r\n t = PrettyTable(['N','Component', env, env2])\r\n count=1\r\n for child in root:\r\n for child2 in root2: \r\n if child.get('name') == child2.get('name'): \r\n if child.get('version') != child2.get('version'):\r\n if stg_filter == 1: \r\n if child.get('name')[:7].find(\"STAGING\") != 0:\r\n #print(child.get('name')[:7].find(\"STAGING\"))\r\n #print (\"---------STABLE-------\", child.get('name'), \"-->\" , child.get('version'), \"---------PROD-------\",child2.get('name'), \"-->\" , child2.get('version'))\r\n #print (child2.get('name'), \"------->\" , child2.get('version'))\r\n #print(\"hola\")\r\n #t.add_row([child.get('name'), child.get('version'), child2.get('version')])\r\n t.add_row([count,child.get('name'), child.get('version'), child2.get('version')])\r\n #t.add_row(['Bob', 19])\r\n count=count+1\r\n else:\r\n #print (\"---------STABLE-------\", child.get('name'), \"-->\" , child.get('version'), \"---------PROD-------\",child2.get('name'), \"-->\" , child2.get('version'))\r\n #print (child2.get('name'), \"------->\" , child2.get('version'))\r\n #print(\"hola\")\r\n #t.add_row([child.get('name'), child.get('version'), child2.get('version')])\r\n t.add_row([count,child.get('name'), child.get('version'), child2.get('version')])\r\n #t.add_row(['Bob', 19])\r\n count=count+1 \r\n print (t)", "def get_texts_from_Excel(file_name_excel, corpus_dir):\n #Creates an object of type Book from xlrd.book object\n try:\n wb = xlrd.open_workbook(filename=file_name_excel, encoding_override=\"utf-8\")\n except xlrd.XLRDError:\n print \"The file at the location {} is not a valid excel format\".format(file_name_excel)\n sys.exit()\n sheet = wb.sheet_by_index(0)\n texts = []\n text_location_dict = {}\n try:\n for row in range(1,sheet.nrows):\n row_dict = {}\n for col in range(sheet.ncols):\n if sheet.cell(row,col).ctype == 3: # 1 is type text, 3 xldate\n date_tuple = xlrd.xldate_as_tuple(sheet.cell_value(row,col), wb.datemode)\n date_py = datetime.datetime(*date_tuple)\n row_dict.update({sheet.cell_value(0,col): date_py}) # a datetime.datetime obj is stored\n else:\n row_dict.update({sheet.cell_value(0,col):sheet.cell_value(row,col)})\n unique_name = str(row_dict[TXT_ID])\n t = TxtItemLetterExcel(unique_name, **row_dict)\n \n if t.unique_name not in text_location_dict:\n t.add_page(getattr(t, PAGE_COL), getattr(t, TIMESTAMP_COL), getattr(t, TRANSCRIPTION_COL)) #note: has to be tested if attributes are correctly imported!\n texts.append(t)\n #dictionary to map text ids with object location - for quick access of individual items\n text_location_dict[t.unique_name] = len(texts)-1\n else:\n # l.Translation - 'Translation' is the name that was given to the column in the Excel file - if the name changes the attribute will change too\n texts[text_location_dict[t.unique_name]].add_page(getattr(t, PAGE_COL), getattr(t, TIMESTAMP_COL), getattr(t, TRANSCRIPTION_COL))\n except KeyError:\n print \"KeyError: possible cause - column names in settings file are not found in the excel source file\"\n sys.exit()\n #add a txt file folder to each object\n file_path = corpus_dir + os.sep + \"txt\"\n for txt_item in texts:\n file_name = txt_item.unique_name + \".txt\"\n txt_item.add_txt_file(file_path, file_name)\n return texts, text_location_dict", "def _make_treeview(self):\n _return = False\n\n _model = gtk.TreeStore(\n gtk.gdk.Pixbuf, gobject.TYPE_INT, gobject.TYPE_STRING,\n gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_FLOAT,\n gobject.TYPE_FLOAT, gobject.TYPE_FLOAT, gobject.TYPE_FLOAT,\n gobject.TYPE_INT, gobject.TYPE_INT, gobject.TYPE_STRING)\n self.treeview.set_model(_model)\n\n for i in range(10):\n _column = gtk.TreeViewColumn()\n if i == 0:\n _cell = self._make_cell('pixbuf', False, 0, _model)\n _cell.set_property('xalign', 0.5)\n _column.pack_start(_cell, False)\n _column.set_attributes(_cell, pixbuf=0)\n\n _cell = self._make_cell('text', False, 1, _model)\n _column.pack_start(_cell, True)\n _column.set_attributes(_cell, text=1)\n _column.set_visible(True)\n elif i == 1:\n _cell = self._make_cell('text', True, 2, _model)\n _column.pack_start(_cell, True)\n _column.set_attributes(_cell, text=2)\n\n _cell = self._make_cell('text', True, 3, _model)\n _column.pack_start(_cell, True)\n _column.set_attributes(_cell, text=3, visible=11)\n _column.set_visible(True)\n elif i in [2, 3, 4]:\n _cell = self._make_cell('text', True, i + 2, _model)\n _column.pack_start(_cell, True)\n _column.set_attributes(_cell, text=i + 2)\n _column.set_visible(True)\n elif i in [5, 6]:\n _cell = self._make_cell('text', True, i + 2, _model)\n _cell.connect('edited', self._do_edit_cell, i + 2, _model)\n _column.pack_start(_cell, True)\n _column.set_attributes(_cell, text=i + 2, visible=10)\n _column.set_visible(True)\n else:\n _cell = self._make_cell('text', False, i + 2, _model)\n _column.pack_start(_cell, True)\n _cell = self._make_cell('text', False, i + 2, _model)\n _column.pack_start(_cell, True)\n _cell = self._make_cell('text', False, i + 2, _model)\n _column.pack_start(_cell, True)\n _cell = self._make_cell('text', False, i + 2, _model)\n _column.pack_start(_cell, True)\n _cell = self._make_cell('text', False, i + 2, _model)\n _column.pack_start(_cell, True)\n\n _column.set_visible(False)\n\n _column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)\n self.treeview.append_column(_column)\n\n return _return", "def make_filiation(self):\n if self.cells is not None:\n filiate_from_bpointer(self.cells)\n# for cell in self.cells:\n# childs = []\n# for cc in self.cells:\n# if cc.bpointer == cell.identifier:\n# childs.append(cc)\n# cc.parent = cell\n# cc.set_division_event()\n# cell.childs = childs\n return", "def create_index_row_logical_model(record, directory):\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(\"<b></b>\", 'lxml')\n if record[\"supplement\"] == \"\\t\\t\\t\" or record[\"supplement\"] == \"\\t\":\n tr = soup.new_tag(\"tr\")\n td_ic_name = soup.new_tag(\"td\")\n td_ic_name[\"data-order\"] = record[\"class name\"]\n if record[\"stereotype\"] != \"missing data\": #The record is a class\n filename = classname_to_filename(str(record['class name']))\n url = \"logical-model/\"+filename\n text = record[\"class name\"]\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_ic_name.insert(1,new_link)\n tr.insert(1,td_ic_name)\n else: #The record is a property\n td_ic_name.string = record[\"class name\"]\n tr.insert(1,td_ic_name)\n \n td_dc_name = soup.new_tag(\"td\")\n td_dc_name[\"data-order\"] = str(record[\"property name\"])\n if record[\"stereotype\"] == \"missing data\": #The record is a property\n filename = str(record['class name'])+\".html#\"+str(record['property name'])\n filename = filename.replace(\"/\", \"-\")\n filename = filename.replace(\"*\", \"-\")\n filename = filename.replace(\" \", \"\")\n filename = filename.replace(\"\\t\", \"\")\n filename = filename.replace(\"\\n\", \"\")\n url = directory+filename\n text = str(record[\"property name\"])\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_dc_name.insert(1,new_link)\n tr.insert(2,td_dc_name)\n else: #The record is a class\n td_dc_name.string = \"-\"\n tr.insert(2,td_dc_name)\n\n if record[\"definition\"] != \"\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"definition\"])\n tr.insert(3,td_def)\n \n if record[\"type\"] != \"missing data\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"type\"])\n tr.insert(4,td_def)\n else:\n td_def = soup.new_tag(\"td\")\n td_def.string = \"-\"\n tr.insert(4,td_def) \n return tr\n else:\n return None", "def tree(cls):\n result = []\n for i_name, i_feature in cls.feature_registry.items():\n if i_feature.filename:\n result.append((i_name, i_feature.filename, i_feature.filename))\n return result", "def traverse_worksheet(sheet):\n laptops = {}\n projectors = {}\n projection_screens = {}\n speakers = {}\n document_cameras = {}\n dvd_players = {}\n vcrs = {}\n\n for cnt, row in enumerate(sheet.rows):\n if cnt > 10 and cnt <= 17:\n if status_tag(row[3].value) != DEPLOYED:\n laptops.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n laptops.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt >= 20 and cnt <=28:\n if status_tag(row[3].value) != DEPLOYED:\n projectors.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n projectors.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt >= 31 and cnt <= 34:\n if status_tag(row[3].value) != DEPLOYED:\n projection_screens.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n projection_screens.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt >= 37 and cnt <= 40:\n if status_tag(row[3].value) != DEPLOYED:\n speakers.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n speakers.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt >= 43 and cnt <= 44:\n if status_tag(row[3].value) != DEPLOYED:\n document_cameras.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n document_cameras.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt == 47:\n if status_tag(row[3].value) != DEPLOYED:\n dvd_players.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n dvd_players.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n if cnt >= 50 and cnt <=51:\n if status_tag(row[3].value) != DEPLOYED:\n vcrs.update({row[0].value: {\"model\": row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value)}})\n else:\n vcrs.update({row[0].value: {\"model\":row[1].value,\n \"servicetag\": row[2].value,\n \"status\": status_tag(row[3].value),\n \"deployedto\": row[4].value,\n \"ticket\": row[5].value}})\n\n return dict(laptop=laptops, projector=projectors,\n projection_screen=projection_screens,\n speaker=speakers, document_camera=document_cameras,\n dvd_player=dvd_players, vcr=vcrs)", "def process_data(self, excel_file, output_title):\n df = read_excel(excel_file)\n labels = df.columns.values.tolist()\n title = f\"{labels[1]} vs {labels[0]}\"\n data = []\n for label in labels:\n data.append(df[label].values.tolist())\n\n for callback in self.callbacks:\n callback(title, data, labels, output_title)", "def preOrder(self,root):\r\n if root is None:\r\n return\r\n else:\r\n print(root.elem, end=\"\\t\")\r\n self.preOrder(root.lchild)\r\n self.preOrder(root.rchild)", "def _set_xls_content(obj, worksheet):\n for row in worksheet.iter_rows(min_row=3, max_col=7, max_row=worksheet.max_row):\n if not row:\n continue\n if row[0].value is None:\n continue\n\n mnemonic, name, is_organisation, address_postal, \\\n address_email, url, orcid = [i.value for i in row]\n\n party = collections.OrderedDict()\n party['mnemonic'] = mnemonic\n party['name'] = name\n party['is_organisation'] = is_organisation in ['yes', 'y']\n party['address_postal'] = address_postal\n party['address_email'] = address_email\n party['url'] = url\n party['orcid'] = orcid\n\n obj['content'].append(party)", "def sort_nodes(self):\n nodes = self._chain.root_node.ordered_subnodes_hierarchy()\n self._chain.nodes = nodes", "def sortby_num(tree, col, descending): # 重新排序 <-- 數字版\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n #data.sort(reverse=descending)\n # 數字的排法(但文字部分就無法排序)\n data.sort(key=lambda data: int(data[0]), reverse=descending)\n\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby_num(tree, col, \\\n int(not descending)))", "def process_xlsx(content):\n data = {}\n workbook = xlrd.open_workbook(file_contents=content)\n worksheets = [w for w in workbook.sheet_names() if not w.startswith('_')]\n for worksheet_name in worksheets:\n if worksheet_name.startswith('_'):\n continue\n\n worksheet = workbook.sheet_by_name(worksheet_name)\n\n merged_cells = worksheet.merged_cells\n if len(merged_cells):\n raise MergedCellError(worksheet.name, merged_cells)\n\n worksheet.name = slughifi(worksheet.name)\n headers = make_headers(worksheet)\n worksheet_data = make_worksheet_data(headers, worksheet)\n data[worksheet.name] = worksheet_data\n return data", "def sortby(tree, col, descending):\r\n # grab values to sort\r\n data = [(tree.set(child, col), child) for child in tree.get_children('')]\r\n \r\n #Figure out if this is a float column. If it is,\r\n #transform to float so the ordering will be meaningful\r\n try:\r\n \r\n data = [( float(x[0]), x[1]) for x in data]\r\n \r\n except:\r\n \r\n #Nope!\r\n pass\r\n \r\n # reorder data\r\n data.sort(reverse=descending)\r\n for indx, item in enumerate(data):\r\n tree.move(item[1], '', indx)\r\n\r\n # switch the heading so that it will sort in the opposite direction\r\n tree.heading(col,\r\n command=lambda col=col: sortby(tree, col, int(not descending)))", "def test_sort_and_fill_taxa_summaries(self):\r\n exp = [\r\n (['Even1', 'Even2', 'Even3'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0.0880247251673, 0.0721968465746, 0.081371761759],\r\n [0., 0., 0.],\r\n [0., 0., 0.],\r\n [0.192137761955, 0.191095101593, 0.188504131885],\r\n [0.0264895739603, 0.0259942669171, 0.0318460745596],\r\n [0.491800007824, 0.526186212556, 0.49911159984],\r\n [0.0311411916592, 0.0184083913576, 0.0282325481054],\r\n [0.166137214246, 0.163087129528, 0.168923372865],\r\n [0., 0., 0.],\r\n [0.00426952518811, 0.00303205147361, 0.0020105109874]])),\r\n (['Even4', 'Even5', 'Even6'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0., 0., 0.],\r\n [0.99, 0.11, 0.075],\r\n [0.1921, 0.19109, 0.18],\r\n [0.192137761955, 0.191095101593, 0.188504131885],\r\n [0.0264895739603, 0.0259942669171, 0.0318460745596],\r\n [0.491800007824, 0.526186212556, 0.49911159984],\r\n [0.0311411916592, 0.0184083913576, 0.0282325481054],\r\n [0.166137214246, 0.163087129528, 0.168923372865],\r\n [0., 0., 0.],\r\n [0.00426952518811, 0.00303205147361, 0.0020105109874]])),\r\n (['Even7', 'Even8'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [1., 1.],\r\n [0., 0.]]))\r\n ]\r\n\r\n obs = _sort_and_fill_taxa_summaries([self.taxa_summary1,\r\n self.taxa_summary2,\r\n self.taxa_summary3])\r\n self.compare_multiple_level_array(obs, exp)", "def run(self, tree):\r\n self.stashed_nodes = {}\r\n\r\n stack = [tree]\r\n\r\n while stack:\r\n currElement = stack.pop()\r\n insertQueue = []\r\n for child in currElement.getchildren():\r\n if child.text and not isinstance(child.text, util.AtomicString):\r\n text = child.text\r\n child.text = None\r\n lst = self.__processPlaceholders(self.__handleInline(\r\n text), child)\r\n stack += lst\r\n insertQueue.append((child, lst))\r\n if child.tail:\r\n tail = self.__handleInline(child.tail)\r\n dumby = util.etree.Element('d')\r\n tailResult = self.__processPlaceholders(tail, dumby)\r\n if dumby.text:\r\n child.tail = dumby.text\r\n else:\r\n child.tail = None\r\n pos = currElement.getchildren().index(child) + 1\r\n tailResult.reverse()\r\n for newChild in tailResult:\r\n currElement.insert(pos, newChild)\r\n if child.getchildren():\r\n stack.append(child)\r\n\r\n if self.markdown.enable_attributes:\r\n for element, lst in insertQueue:\r\n if element.text:\r\n element.text = \\\r\n inlinepatterns.handleAttributes(element.text, \r\n element)\r\n i = 0\r\n for newChild in lst:\r\n # Processing attributes\r\n if newChild.tail:\r\n newChild.tail = \\\r\n inlinepatterns.handleAttributes(newChild.tail,\r\n element)\r\n if newChild.text:\r\n newChild.text = \\\r\n inlinepatterns.handleAttributes(newChild.text,\r\n newChild)\r\n element.insert(i, newChild)\r\n i += 1\r\n return tree", "def get_data(tree_elem):\n fly_lst = []\n for element in tree_elem:\n for elem in element.xpath('td/label/div[1]/span'):\n fly_dict = dict()\n fly_info_lst = [item.strip() for item in elem.xpath('@title')[0].split(',')]\n class_cost_lst = fly_info_lst[3].split(':')\n fly_dict['dep/arv'] = fly_info_lst[1]\n fly_dict['dur'] = fly_info_lst[2]\n fly_dict['class'] = class_cost_lst[0]\n fly_dict['cost'] = get_price(class_cost_lst[1])\n fly_lst.append(fly_dict)\n return fly_lst", "def excel_fun_read(file_name, template_name, template_location, counter) :\r\n for list_number in range(1, 4) :\r\n inputWorkbook = xlrd.open_workbook(file_name)\r\n inputWorksheet = inputWorkbook.sheet_by_index(list_number)\r\n rows = inputWorksheet.nrows\r\n cols = inputWorksheet.ncols\r\n print(f'{rows} Rows in the file\\t') # <- get rows number starts from 0\r\n print(f'{cols} Cols in the file\\n') # <- get coloms number starts from 0\r\n dictionary = {1 : 'ATR', 2 : 'ESS Hot cycle 1', 3 : 'ESS Cold cycle 1', 4 : 'ESS Hot cycle 2',\r\n 5 : 'ESS Cold cycle 2'}\r\n if cols == 9 :\r\n print('next file')\r\n if cols == 12 or cols == 9 :\r\n cols = 8\r\n sub = 2\r\n else :\r\n cols = 12\r\n sub = 3\r\n for excel_row in range(1, sub) :\r\n sn = int(inputWorksheet.cell_value(0, cols))\r\n print(f'working on 000{sn}.xlsx') # <- Indicates which file is open\r\n TestLocation_list = [] # <- Creation of list\r\n PassFail_col_list = [] # <- Creation of list\r\n for i in range(rows) :\r\n # Follow the H colom check if there is 'PASS'/'FAIL' or empty cell\r\n # If empty cell skip it until the end of the excel file\r\n if inputWorksheet.cell_value(i, cols - excel_row) == 'PASS' or inputWorksheet.cell_value(i,\r\n cols - excel_row) == 'FAIL' or inputWorksheet.cell_value(\r\n i, cols - excel_row) == 'N/T' :\r\n TestLocation_list.append(i)\r\n PassFail_col_list.append(str(inputWorksheet.cell_value(i, cols - excel_row)))\r\n\r\n location_list, len_of_every_test_list = Create_2_lists_of_locations(TestLocation_list, list_number)\r\n pass_fail_list = sort_list_of_pass_and_fail(len_of_every_test_list, location_list, file_name, list_number,\r\n cols, PassFail_col_list, excel_row)\r\n\r\n # print(f'''It's the end of {list_number} in file 000{sn} excel_row = {excel_row}''')\r\n print(f'''It's the end of {dictionary.pop(counter)} in file 000{sn}\\n''')\r\n write_to_excel(sn, template_location, pass_fail_list, counter)\r\n counter += 1\r\n\r\n print('''it's the end of the loop''')", "def transform_ordering(new_document_list):\n doc_list = []\n for page in new_document_list:\n # print(\"PAGE:\")\n page_list = []\n for col_type in page:\n # print(\"\\tCOL_TYPE:\")\n for column in col_type:\n # print(\"\\t\\tCOLUMN:\")\n for group in column:\n # print(\"\\t\\t\\t\" + str(group))\n page_list.append(group)\n doc_list.append(page_list)\n return doc_list", "def _update_ordering(self):\n self._descendants = sorted(self.unordered_descendants(), key=lambda node: node.ord)\n for (new_ord, node) in enumerate(self._descendants, 1):\n node.ord = new_ord", "def post_order(self):\n try:\n if not self.root:\n return \"the tree is empty!\"\n else:\n output = []\n\n def order_tree(node):\n if node.left:\n order_tree(node.left)\n if node.right:\n order_tree(node.right)\n nonlocal output\n output += [node.value]\n return output\n final_out = order_tree(self.root)\n return final_out\n except:\n print(\"something went wrong please try again\")", "def create_index_row_with_supplements(record, directory):\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(\"<b></b>\", 'lxml')\n if record[\"supplement\"] == \"\\t\\t\\t\":\n tr = soup.new_tag(\"tr\")\n td_supplement = soup.new_tag(\"td\")\n tr.insert(1,td_supplement)\n\n td_ic_name = soup.new_tag(\"td\")\n td_ic_name[\"data-order\"] = record[\"class name\"]\n filename = classname_to_filename(str(record['class name']))\n url = directory + filename\n text = record[\"class name\"]\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_ic_name.insert(1,new_link)\n tr.insert(2,td_ic_name)\n \n if record[\"definition\"] != \"\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"definition\"])\n tr.insert(3,td_def)\n \n return tr\n elif record[\"supplement\"] == \"\\t\\t\\tEuropean Supplement\":\n tr = soup.new_tag(\"tr\")\n td_supplement = soup.new_tag(\"td\")\n span_supplement = soup.new_tag(\"spam\")\n span_supplement['class'] = \"badge badge-secondary\"\n span_supplement.string = \"European Supplement\"\n td_supplement.insert(1,span_supplement)\n tr.insert(1,td_supplement)\n \n td_ic_name = soup.new_tag(\"td\")\n td_ic_name[\"data-order\"] = record[\"class name\"]\n filename = classname_to_filename(str(record['class name']))\n url = directory + filename\n text = record[\"class name\"]\n print(text)\n new_link = soup.new_tag(\"a\")\n new_link['href'] = url\n new_link['target'] = \"_blank\"\n new_link.string = text\n td_ic_name.insert(1,new_link)\n tr.insert(2,td_ic_name)\n \n if record[\"definition\"] != \"\":\n td_def = soup.new_tag(\"td\")\n td_def.string = str(record[\"definition\"])\n tr.insert(3,td_def)\n \n return tr\n else:\n return None", "def __gen_hierarchy_file(self, layer):\n paula_id = '{}.{}.{}_{}'.format(layer, self.corpus_name, self.name,\n layer)\n self.paulamap['hierarchy'][layer] = paula_id\n E, tree = gen_paula_etree(paula_id)\n\n dominance_edges = select_edges_by(\n self.dg, layer=layer, edge_type=EdgeTypes.dominance_relation,\n data=True)\n span_edges = select_edges_by(\n self.dg, layer=layer, edge_type=EdgeTypes.spanning_relation,\n data=True)\n dominance_dict = defaultdict(lambda: defaultdict(str))\n for source_id, target_id, edge_attrs in dominance_edges:\n if source_id != layer+':root_node':\n dominance_dict[source_id][target_id] = edge_attrs\n\n # in PAULA XML, token spans are also part of the hierarchy\n for source_id, target_id, edge_attrs in span_edges:\n if istoken(self.dg, target_id):\n dominance_dict[source_id][target_id] = edge_attrs\n\n # NOTE: we don't add a base file here, because the nodes could be\n # tokens or structural nodes\n slist = E('structList', {'type': layer})\n for source_id in dominance_dict:\n struct = E('struct',\n {'id': str(source_id)})\n if self.human_readable:\n struct.append(Comment(self.dg.node[source_id].get('label')))\n\n for target_id in dominance_dict[source_id]:\n if istoken(self.dg, target_id):\n href = '{}.xml#{}'.format(self.paulamap['tokenization'],\n target_id)\n else:\n href = '#{}'.format(target_id)\n\n rel = E(\n 'rel',\n {'id': 'rel_{}_{}'.format(source_id, target_id),\n 'type': dominance_dict[source_id][target_id]['edge_type'],\n XLINKHREF: href})\n struct.append(rel)\n if self.human_readable:\n struct.append(\n Comment(self.dg.node[target_id].get('label')))\n slist.append(struct)\n tree.append(slist)\n self.files[paula_id] = tree\n self.file2dtd[paula_id] = PaulaDTDs.struct\n return paula_id", "def exportHtmlTables(self, filePath=''):\n if not filePath:\n filePath = QtGui.QFileDialog.getExistingDirectory(QtGui.\n QApplication.activeWindow(),\n _('TreeLine - Export HTML'),\n self.defaultFilePath)\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n oldDir = os.getcwd()\n os.chdir(filePath)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n if len(self.selectedNodes) > 1:\n modelRef = self.selectedNodes[0].modelRef\n dummyFormat = modelRef.formats.addDummyRootType()\n root = treenode.TreeNode(None, dummyFormat.name, modelRef)\n name = os.path.basename(self.defaultFilePath)\n if not name:\n name = treemodel.defaultRootName\n root.setTitle(name)\n for node in self.selectedNodes:\n root.childList.append(copy.copy(node))\n root.childList[-1].parent = root\n else:\n root = self.selectedNodes[0]\n root.exportHtmlTable()\n root.modelRef.formats.removeDummyRootType()\n os.chdir(oldDir)\n return False", "def getHierarchy(unique_name):", "def getHierarchy(unique_name):", "def getHierarchy(unique_name):" ]
[ "0.58093977", "0.55304813", "0.54485965", "0.5410043", "0.53819305", "0.5349107", "0.5269191", "0.5251047", "0.51980996", "0.5178889", "0.51472735", "0.5129128", "0.5119549", "0.50939554", "0.5085807", "0.5049187", "0.5038129", "0.50297076", "0.5020485", "0.49983808", "0.49734384", "0.4949488", "0.4944065", "0.49423715", "0.49324387", "0.49009293", "0.48964632", "0.4881258", "0.4864224", "0.4860404", "0.48492768", "0.4846225", "0.48270237", "0.4823878", "0.4823377", "0.4819792", "0.48189527", "0.48052388", "0.48052388", "0.4803169", "0.47950548", "0.47904494", "0.47893345", "0.47883502", "0.47840738", "0.4762782", "0.47573596", "0.475317", "0.47255215", "0.47198814", "0.47192064", "0.47165525", "0.47030407", "0.47022888", "0.46959975", "0.46945983", "0.46813798", "0.46717674", "0.4670383", "0.46698624", "0.46670914", "0.46613353", "0.46396047", "0.4639368", "0.46384946", "0.46360308", "0.46289018", "0.46117115", "0.46060556", "0.45839116", "0.45811108", "0.4576534", "0.45732668", "0.45694405", "0.4568486", "0.45683298", "0.4560139", "0.4557952", "0.45531753", "0.45477235", "0.454679", "0.45377833", "0.4531408", "0.452742", "0.45261043", "0.4522029", "0.4519455", "0.45168635", "0.45158863", "0.45148686", "0.45131227", "0.45130065", "0.45124325", "0.45117083", "0.45114684", "0.45087463", "0.45050177", "0.45039305", "0.45039305", "0.45039305" ]
0.49841878
20
Creates variable of chosen exceldocument
def entry_set_excel(self, entry): global exceldokument exceldokument = filedialog.askopenfilename(filetypes=[("Excel file","*.xlsx"),("Excel file", "*.xlsm")]) entry.delete(0, 'end') entry.insert(tk.END, exceldokument)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def documento():\r\n\tpass", "def edit_document():", "def build_document(self):\n pass", "def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))", "def create(init_document: 'Document') -> 'DocumentArray':", "def _create_document(result_dict):\n document = Document(\n name=result_dict['docname'],\n original_id=result_dict['itemid'],\n doctype=result_dict['doctype'],\n language=result_dict['languageisocode'],\n conclusion=result_dict['conclusion'],\n originatingbody=result_dict['originatingbody'],\n application=result_dict['application'],\n )\n return document", "def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass", "def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)", "def __call__(self, doc):\n return doc", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "def document(self):\n ...", "def find_document(self):\n pass", "def choose_file(self):\n self.choice = self.client.choose_file()\n\n if self.choice.drive == 'document':\n from docsparser import DocsParser\n self.parser = DocsParser(self.client, self.choice)\n else:\n raise NotImplementedError('{} service not implemented'.format(self.choice.drive))\n\n return self.choice", "def create_document(file_name):\n path = INPUT_DIR+file_name # assemble the file descriptor\n file = open(path) # open in read mode\n doc = Document() # create a new document\n # add the title field\n doc.add(StringField(\"title\", input_file, Field.Store.YES))\n # add the whole book\n doc.add(TextField(\"text\", file.read(), Field.Store.YES))\n file.close() # close the file pointer\n return doc", "def _create_template( service):\n\n return DOCS.documents().create(body=template_page_setup).execute().get('documentId')", "def input_file_docx(str_write, str_answer):\r\n paragraph = dti.add_paragraph(str_write)\r\n paragraph_format = paragraph.paragraph_format\r\n paragraph_format.space_after = Pt(1.0)\r\n\r\n paragraph = dti1.add_paragraph(str_answer)\r\n paragraph_format = paragraph.paragraph_format\r\n paragraph_format.space_after = Pt(1.0)", "def get_document(self):\n\t\tif(self.fs.tmp_dir):\n\t\t\tfull_filename = self.fs.tmp_dir + os.sep + self.fs.get_document()\n\t\telse:\n\t\t\tfull_filename = self.fs.get_document()\n\t\t\n\t\treturn full_filename", "def _load_document(path, app):\n start_inventor()\n document_type_enum = {\n 12289: 'UnnownDocument',\n 12290: 'PartDocument',\n 12291: 'AssemblyDocument',\n 12292: 'DrawingDocument',\n 12293: 'PresentationDocument',\n 12294: 'DesignElementDocument',\n 12295: 'ForeignModelDocument',\n 12296: 'SATFileDocument',\n 12297: 'NoDocument',\n }\n try:\n app.Documents.Open(str(path))\n document_type = document_type_enum[app.ActiveDocumentType]\n doc = win32com.client.CastTo(app.ActiveDocument, document_type)\n print(doc, document_type)\n return doc\n except:\n print('unable to load file')\n return None", "def _get_doc(results, index):\n return results[index]", "def create_document(self, output):\n if not os.path.exists(self.template_path):\n raise IOError('Template file not found.')\n\n documents = []\n with open(self.template_path, 'rb') as f:\n data = f.read()\n template = Template(to_unicode(data))\n indent_targets = ['params', 'response_body']\n for v in self.vars:\n if self.template_path.endswith('.rst'):\n for k in indent_targets:\n lines = v[k].split('\\n')\n ret = []\n for i, l in enumerate(lines):\n if i > 0:\n ret.append(' {0}'.format(l).rstrip())\n else:\n ret.append(l)\n v[k] = '\\n'.join(ret)\n\n document = template.substitute(v)\n documents.append(document)\n\n with open(output, 'w') as f:\n f.write('\\n'.join(documents))", "def GetDocument(self):\n return self.file", "def dummy(doc):\r\n return doc", "def open_document(filepath, show=True):\n\t\n\tk = krita.Krita.instance()\n\tprint('Debug: opening %s' % filepath)\n\tdoc = k.openDocument(filepath)\n\tif show:\n\t\tApplication.activeWindow().addView(doc)\n\treturn doc", "def _get_document(self, doc_uid, doc_type, row, mappings):\n # Create document.\n doc = pyesdoc.create(doc_type,\n project=DOC_PROJECT,\n source=DOC_SOURCE,\n version=1,\n uid=doc_uid)\n\n # Assign document dates.\n try:\n doc.meta\n except AttributeError:\n pass\n else:\n doc.meta.create_date = DOC_CREATE_DATE\n doc.meta.update_date = DOC_UPDATE_DATE\n\n # Assign document author.\n try:\n doc.meta.author = DOC_AUTHOR_REFERENCE\n except AttributeError:\n pass\n\n # Set document attributes from mapped worksheet cells.\n for mapping in mappings:\n self._set_document_attribute(doc, row, mapping)\n\n return doc", "def get_document(name):\n document = [d for d in documents if d.name == name]\n if len(document) > 0:\n return document[0]", "def CreateNewFile(self):\n\t\tself.acad.Documents.Add()", "def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')", "def _doc_create(type, data):\n doc = dict(data)\n doc.update({'model_type': type})\n return doc", "def _getDocumentType(self):\n\n fileName, fileExtension = os.path.splitext(self.sourceFileName)\n documentType = None\n\n for knownType in list(TextRepresentation.KNOWNTYPES.keys()):\n if knownType == fileExtension[1:]:\n documentType = knownType\n\n if documentType is None:\n raise Exception(\"Unknown document type: %s\" % fileExtension[1:])\n \n return documentType", "def getDocumentId(self): #$NON-NLS-1$\r", "def getdoc():\n\n\timport webnotes\n\tfrom webnotes.utils import cint\n\t\n\tform = webnotes.form_dict\n\tdoctype, docname = form.get('doctype'), form.get('name')\n\tprefix = cint(form.get('from_archive')) and 'arc' or 'tab'\n\n\tif not (doctype and docname):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tdoclist = []\n\t# single\n\tdoclist = load_single_doc(doctype, docname, (form.get('user') or webnotes.session['user']), prefix)\n\t\n\t# load doctype along with the doc\n\tif form.get('getdoctype'):\n\t\timport webnotes.model.doctype\n\t\tdoclist += webnotes.model.doctype.get(doctype)\n\n\t# tag as archived\n\tif prefix == 'arc':\n\t\tdoclist[0].__archived=1\n\n\twebnotes.response['docs'] = doclist", "def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)", "def get_document(self):\n return self.document", "def make_document(source_path=\"notset\") -> nodes.document:\n settings = OptionParser(components=(RSTParser,)).get_default_values()\n return new_document(source_path, settings=settings)", "def setup_document(document_name=\"fSCAD-Preview\"):\n preview_doc = None\n saved_camera = None\n saved_units = None\n for document in app().documents:\n if document.name == document_name:\n preview_doc = document\n break\n if preview_doc is not None:\n preview_doc.activate()\n saved_camera = app().activeViewport.camera\n saved_units = design().fusionUnitsManager.distanceDisplayUnits\n preview_doc.close(False)\n\n preview_doc = app().documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)\n preview_doc.name = document_name\n preview_doc.activate()\n if saved_camera is not None:\n is_smooth_transition_bak = saved_camera.isSmoothTransition\n saved_camera.isSmoothTransition = False\n app().activeViewport.camera = saved_camera\n saved_camera.isSmoothTransition = is_smooth_transition_bak\n app().activeViewport.camera = saved_camera\n if saved_units is not None:\n design().fusionUnitsManager.distanceDisplayUnits = saved_units\n design().designType = adsk.fusion.DesignTypes.DirectDesignType", "def create_entity_claim_input_file_doc_ret():\n claim_doc = open(r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\rawClaim_SW.txt\").read().strip()\n \"remove the stop words from the claims\"\n SW_doc = r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\stopWords.xml\"\n stopWords_list = []\n claims_no_SW_dict = {}\n with open(SW_doc, 'r') as f:\n line = f.readline()\n while line !=\"\":\n if \"<word>\" in line:\n stopWords_list.append(line.split(\"<word>\")[1].split(\"</word>\")[0])\n line = f.readline()\n \n for i,line in enumerate(claim_doc.split(\"\\n\")):\n clmLMdocLM_doc_ret_query_file = open(\"LMdocLM_doc_ret_query_file_clm_\"+str(i+1),\"wb\")\n clmLMdocLM_doc_ret_query_file.write(\"<parameters>\\n\")\n curr_claim_words = line.split(\"|\")[1].lower().split()\n curr_entity_words = line.split(\"|\")[0].lower().split()\n noSW_claim = \"\"\n noSW_entity = \"\"\n for word in curr_claim_words:\n if word not in stopWords_list: \n noSW_claim += word+\" \"\n for word in curr_entity_words:\n if word not in stopWords_list: \n noSW_entity += word+\" \"\n# clmLMdocLM_doc_ret_query_file.write(\"<query><number>\"+str(i+1)+\"</number><text>\"+noSW_entity+\"|\"+noSW_claim+\"</text></query>\\n\")\n# clmLMdocLM_doc_ret_query_file.write(\"</parameters>\")\n# clmLMdocLM_doc_ret_query_file.close()\n claims_no_SW_dict[str(i+1)] = (noSW_entity,noSW_claim)\n save_pickle(\"claims_no_SW_dict\", claims_no_SW_dict)", "def test() -> None:\n docx2python(\"resources/example.docx\")", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def create_document(lines_of_document):\n\n Document = collections.namedtuple('Document', ['header', 'body'])\n document = Document(body=' '.join(lines_of_document[1:]),\n header=lines_of_document[0])\n\n return document", "def getbyname(self, name, doctype='experiment'):\n\n if doctype not in self.documents:\n self.documents[doctype] = esd.search(self.source, doctype)\n return self.documents[doctype].load_document(name)", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def test_loading_document(self):", "def documents(pmid_23982599, civic_aid6_document):\n return [pmid_23982599, civic_aid6_document]", "def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj", "def GetDocument(self, *args, **kwargs):\n pass", "def GetOpenedFile(self,file):\n\t\tif isinstance(file,str):\n\t\t\tindex = self.OpenedFilenames.index(file)\n\t\telif isinstance(file,int):\n\t\t\tindex=file\n\t\telse:\n\t\t\traise PycomError('Type of file in GetOpenedFile is wrong ')\n\t\treturn self.acad.Documents.Item(index)", "def _getForDocument (self):\n return self.__forDocument", "def xelatex_document(doc_args, template_file, field_name, output_dir='.'):\n # input data\n input_data = doc_args\n\n # template doc\n template_doc = XeLateXDocument(template_file)\n\n # output file name\n field_val = input_data[field_name].replace(' ', '')\n\n file_extension = get_extension(template_file)\n basename = path.basename(template_file).replace(file_extension, '')\n\n file_name = basename + '_' + field_val\n file_path = path.join(output_dir, file_name + '.pdf')\n\n # make output folder\n if not os.path.exists(output_dir):\n os.mkdir(outdir)\n\n # fill the template\n template_doc.fill(doc_args)\n\n # save into PDF\n template_doc.render(file_path)\n\n # clean up LateX mess\n cleanup_docstamp_output(output_dir)\n\n return file_path", "def _load(self):\n service_manager = helper_util.getServiceManager(self.hostname, self.port,\n self.uno_path,\n self.office_binary_path)\n desktop = service_manager.createInstance(\"com.sun.star.frame.Desktop\")\n uno_url = self.systemPathToFileUrl(self.document_url)\n uno_document = desktop.loadComponentFromURL(uno_url, \"_blank\", 0, ())\n if not uno_document:\n raise AttributeError(\"This document can not be loaded or is empty\")\n if self.refresh:\n # Before converting to expected format, refresh dynamic\n # value inside document.\n dispatcher = service_manager.createInstance(\"com.sun.star.frame.DispatchHelper\")\n for uno_command in ('UpdateFields', 'UpdateAll', 'UpdateInputFields',\n 'UpdateAllLinks', 'UpdateCharts',):\n dispatcher.executeDispatch(uno_document.getCurrentController().getFrame(),\n '.uno:%s' % uno_command, '', 0, ())\n module_manager = service_manager.createInstance(\"com.sun.star.frame.ModuleManager\")\n self.document_type = module_manager.identify(uno_document)\n self.document_loaded = uno_document", "def getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension):\n DOCUMENT_DATE = regex.getDateDoc(text)\n #getting probable author\n AUTHOR = regex.getAuthor(text)\n strModif = file[len(pathFolder):-len(extension)] #getting rid of path and extension\n documentName = strModif.split(\"_\")\n patientIpp = documentName[0]\n documentId = documentName[1]\n #get patient id from patientIpp\n conn = db.create_connection(DATABASE)\n patient_num = db.select_patient_id(conn, patientIpp.lstrip('0')) #removing leading 0 causing troubles in select\n conn.close()\n return (documentId, patient_num, DOCUMENT_ORIGIN_CODE, DOCUMENT_DATE, date.today().strftime(\"%m/%d/%y\"), text, AUTHOR)", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def examine_document(self, action):\n doc = action[1] # this should have a document ID so we can pull out the correct document text\n screen = DocScreen('doc_title', 'doc_content goes here')\n\n return screen", "def get_document_by_name(label, doc_type):\n return Documents.query.filter_by(type=doc_type, label=label).first()", "def handle_inputs(args):\n category = args['template']\n template = args['t']\n \n if category == 'klangregie':\n file_to_open = 'Maximiliano_Estudies-Rechnung_Klangregie-2020.ods'\n elif category == 'musikfabrik':\n file_to_open = 'Maximiliano_Estudies-Rechnung_Musikfabrik-2020.ods'\n elif category == 'otros+mwst':\n file_to_open = 'Maximiliano_Estudies-Otros+MwSt-2020_copy.ods'\n else:\n file_to_open = \"no_file\"\n\n return file_to_open", "def document_view(index_name, doc_type, doc_id):\n resp = es.get(index=index_name, doc_type=doc_type, id=doc_id)\n document = resp[\"_source\"]\n print(document)", "def get_document(name):\n doc = _document_registry.get(name, None)\n if not doc:\n # Possible old style name\n single_end = name.split(\".\")[-1]\n compound_end = \".%s\" % single_end\n possible_match = [\n k for k in _document_registry if k.endswith(compound_end) or k == single_end\n ]\n if len(possible_match) == 1:\n doc = _document_registry.get(possible_match.pop(), None)\n if not doc:\n raise NotRegistered(\n \"\"\"\n `%s` has not been registered in the document registry.\n Importing the document class automatically registers it, has it\n been imported?\n \"\"\".strip()\n % name\n )\n return doc", "def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)", "def open(self, path: str, options: str = \"silent\", configuration: str = str()):\n\n if os.path.splitext(path)[1] == \".SLDPRT\":\n type_value = DocumentTypes.PART.value\n elif os.path.splitext(path)[1] == \".SLDASM\":\n type_value = DocumentTypes.ASSEMBLY.value\n elif os.path.splitext(path)[1] == \".SLDDRW\":\n type_value = DocumentTypes.DRAWING.value\n else:\n raise ValueError(\"Incompatible File Type\")\n\n _options = OpenDocOptions[options.upper().replace(\" \", \"_\")].value\n pointer, error, warning = self._opendoc6(\n path, type_value, _options, configuration\n )\n return Doc(pointer), error, warning", "def __init__(self, docx, strict=False):\n self.strict = strict\n document_part = Package.open(docx).main_document_part\n if document_part.content_type != CONTENT_TYPE.WML_DOCUMENT_MAIN:\n tmpl = \"file '%s' is not a Word file, content type is '%s'\"\n raise ValueError(tmpl % (docx, document_part.content_type))\n super().__init__(document_part._element, document_part)", "def document(self) -> str:\n return pulumi.get(self, \"document\")", "def get_document(self, docid):\n raise NotImplementedError", "def open( self, filename ):\r\n #http://www.oooforum.org/forum/viewtopic.phtml?t=35344\r\n properties = []\r\n properties.append( OpenOfficeDocument._makeProperty( 'Hidden', True ) ) \r\n properties = tuple( properties )\r\n self.oodocument = self.openoffice.loadComponentFromURL( uno.systemPathToFileUrl( os.path.abspath( filename ) ), \"_blank\", 0, properties )", "def new_document(klass, name=None, author=None):\n doc = Factory.new_document(klass, author)\n doc.name = name\n doc._osl.id = uuid.uuid4()\n return doc", "def fini_doc(self):\n raise NotImplementedError()", "def __init__(self):\n self.id = 0\n self.file_name = \"\"\n self.content_type = \"\"\n self.versions = []\n self.folder = Folder()\n self.url = \"\" \n\n self.upload_doc = []\n self.description = \"\" \n self.tags = \"\" \n self.notify = 0", "def testMakeDocument(self):\n\n # I've split the wanted result string up into substrings so I can\n # amend it more easily (or so I hope).\n trivial_package = \"\"\"\\\n<document source=\"Package trivial_package\">\n <section class=\"package\" id=\"package-trivial-package\" name=\"package trivial_package\">\n <title>\n Package trivial_package\\n\"\"\"\n\n # The \"xml:space\" attribute is by observation, not prediction\n module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-init\" name=\"module trivial_package.__init__\">\n <title>\n Module trivial_package.__init__\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n A simple docstring.\\n\"\"\"\n\n module_file1 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file1\" name=\"module trivial_package.file1\">\n <title>\n Module trivial_package.file1\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This is the first example file. It *does* use reStructuredText.\n <section class=\"class\" id=\"class-trivial-package-file1-fred\" name=\"class trivial_package.file1.fred\">\n <title>\n Class trivial_package.file1.Fred\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n An example class - it announces each instance as it is created.\\n\"\"\"\n\n module_file2 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file2\" name=\"module trivial_package.file2\">\n <title>\n Module trivial_package.file2\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This module is *not* using reStructuredText for its docstrings.\\n\"\"\"\n\n non_python_file = \"\"\"\\\n <section class=\"file\" id=\"file-trivial-package-not-python\" name=\"file trivial_package.not_python\">\n <title>\n File trivial_package.not_python\n <paragraph>\n File \n <literal>\n not_python\n is not a Python module.\\n\"\"\"\n\n sub_package = \"\"\"\\\n <section class=\"package\" id=\"package-trivial-package-sub-package\" name=\"package trivial_package.sub_package\">\n <title>\n Package trivial_package.sub_package\\n\"\"\"\n\n sub_module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-sub-package-init\" name=\"module trivial_package.sub_package.__init__\">\n <title>\n Module trivial_package.sub_package.__init__\\n\"\"\"\n\n wanted_result = (trivial_package + module_init + module_file1 +\n module_file2 + non_python_file + sub_package +\n sub_module_init)\n\n tree = parse_package(\"trivial_package\")\n\n document = make_document(tree)\n\n actual_result = document.pformat()\n\n if wanted_result != actual_result:\n print \"+++++++++++++++++++++++++ WANT\"\n print wanted_result\n print \"+++++++++++++++++++++++++ GOT\"\n print actual_result\n print \"+++++++++++++++++++++++++\"\n\n self.assertEqual(actual_result,wanted_result)", "def get_document_from_xtracta(document_id):\n response = None\n response = requests.post(XTRACTA['url']+\"documents\",\n data={\"submit\":\"Submit\",\\\n \"api_key\":XTRACTA['key'],\\\n \"document_id\":document_id})\n return response", "def document_details(context, document):\n return {'document': document, 'request': context['request']}", "def DocumentType(self, default='article'):\n return self.data.get('document_type', [{}])", "def extract_document(cls, obj_id, obj=None):\n if obj is None:\n obj = cls.get_model().get(id=obj_id)\n return {\n 'id': unicode(obj.id),\n 'init_user': {\n 'email': obj.init_user.email\n },\n 'text': obj.text,\n 'url': obj.get_absolute_url(),\n '_parent': unicode(obj.content_object.pk) # todo use only experiments pk\n }", "def build(self):\n if self.project_folder_name:\n folder_id = self._project_folder_id()\n if self.sub_folder_name:\n folder_id = self._get_sub_folder_id(folder_id)\n\n if folder_id:\n file_id = create_file_in_drive(self.sheet_title, folder_id)\n else:\n file_id = create_file_in_drive(self.sheet_title)\n\n return file_id", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def _initNewEntryDocument(self, atomDoc): #@UnusedVariable #$NON-NLS-1$\r\n pass", "def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()", "def get_doc(self):\n return self.p", "def create_ref_file(self):\n id = self.task_record.create_published_output_name()\n ctx = self.block_store.make_local_output(id)\n self.open_ref_contexts[ctx.get_filename()] = ctx\n return ctx.get_filename()", "def GetDoc(index_wd, key, value):\n # init code:\n key = key.lower().strip()\n value = value.strip()\n docid_to_docno_path = os.path.join(index_wd, 'docid_to_docno.p')\n docno_to_data_path = os.path.join(index_wd, 'docno_to_data.p')\n # check to see what key is (docno or docid)\n\n # if docid, need to grab the docid_to_docno from disk\n # Lookup via docid:\n if key == 'docid':\n docid_to_docno = pickle.load(open(docid_to_docno_path, \"rb\"))\n value = int(value)\n if value not in docid_to_docno:\n print(\"The following docid: {} was not found.\".format(value))\n return False\n else:\n docno = docid_to_docno[value]\n #Lookup via docno:\n elif key == 'docno':\n docno = value\n else:\n print(\"The following key: {} is invalid. Pass: 'docid' or 'docno'\".format(key))\n return False\n docno_to_data = pickle.load(open(docno_to_data_path, \"rb\"))\n\n if docno not in docno_to_data:\n print(\"The following docno: {} was not found.\".format(value))\n return False\n\n doc_path = docno_to_data[docno]\n\n basepath = os.path.dirname(doc_path)\n\n if not os.path.exists(basepath):\n print(\"The basepath {} does not exist.\".format(basepath))\n return False\n\n metadata = MetaData(doc_path)\n metadata.load()\n metadata.meta_print()\n return True", "def get_doc(self, dtype, identity):\n if dtype == 'pii':\n doc = FullDoc(sd_pii = identity)\n elif dtype == 'doi':\n doc= FullDoc(doi = identity)\n\n if doc.read(ElsClient(self.API_list[0])):\n pass\n else:\n print (\"Read document failed.\")\n\n return doc", "def define_file() -> str:\n\n print('===SELECT FILE===')\n filenames = next(walk(IMAGES), (None, None, []))[2]\n filenames.append('Exit the program')\n command = menu(filenames)\n if int(command) == len(filenames):\n exit()\n else:\n return filenames[int(command) - 1]", "def core_document_parser(session, filename, options):\n vp = CoreVersionParser(filename, options)\n if 'dom' not in options:\n options['dom'] = vp.dom\n if vp.version == '0.0':\n doc = CoreDocumentParser0(session, filename, options)\n elif vp.version == '1.0':\n doc = CoreDocumentParser1(session, filename, options)\n else:\n raise ValueError('unsupported document version: %s' % vp.version)\n return doc", "def get_type(self):\n\n return ebooklib.ITEM_DOCUMENT", "def doc(obj):\n return Documentation.fromObject(obj).first", "def make_documents(f, index: str) -> typing.Iterator[dict]:\n\n while True:\n line = f.readline()\n if not line:\n break\n idx = int(line.strip())\n line = f.readline()\n doc = {\n '_index': index,\n '_type': \"_doc\",\n '_source': line.strip(),\n '_id': idx,\n }\n yield doc", "def createObject(self, *args):\n return _libsbml.CompSBMLDocumentPlugin_createObject(self, *args)", "def choose_documents(update, context):\n sc_api = SmartCAT(SMARTCAT_API_USERNAME, SMARTCAT_API_PASSWORD)\n names_or_ids = update.message.text.lower().strip()\n documents = []\n response = sc_api.project.get(SMARTCAT_PROJECT_ID)\n if response.status_code != 200:\n update.message.reply_text(SHIT_HAPPENS + \"\\nВведи названия глав еще раз или /cancel для выхода\")\n return constants.STATE_CHOOSE_DOCUMENT\n\n project_data = json.loads(response.content.decode('utf-8'))\n if not project_data:\n update.message.reply_text(SHIT_HAPPENS + \"\\nВведи названия глав еще раз или /cancel для выхода\")\n return constants.STATE_CHOOSE_DOCUMENT\n\n logger.info('names_or_ids = {}'.format(names_or_ids))\n if names_or_ids == ALL_CHAPTERS.lower() or names_or_ids == ALL.lower():\n documents = project_data['documents']\n elif names_or_ids == ACTIVE_CHAPTERS.lower():\n for d in project_data['documents']:\n logger.info(\"{} - {}\".format(d['name'], get_document_stage(d)))\n if get_document_stage(d) < constants.CHAPTER_STATE_FINAL_EDITING:\n logger.info(\"Document added: {0} {1}\".format(d['id'], d['name']))\n documents.append(d)\n elif names_or_ids == CHAPTERS_BEING_TRANSLATED.lower():\n for d in project_data['documents']:\n if get_document_stage(d) == constants.CHAPTER_STATE_TRANSLATION:\n logger.info(\"Document added: {0} {1}\".format(d['id'], d['name']))\n documents.append(d)\n elif names_or_ids == CHAPTERS_BEING_EDITED.lower():\n for d in project_data['documents']:\n if get_document_stage(d) == constants.CHAPTER_STATE_EDITING:\n logger.info(\"Document added: {0} {1}\".format(d['id'], d['name']))\n documents.append(d)\n else:\n names_or_ids = [s.lower().strip() for s in names_or_ids.split(\"\\n\")]\n for d in project_data['documents']:\n if d['id'].lower() in names_or_ids or d['name'].lower().strip() in names_or_ids:\n logger.info(\"Document added: {0} {1}\".format(d['id'], d['name']))\n documents.append(d)\n\n if len(documents) == 0:\n update.message.reply_text(NOTHING_FOUND + \"\\nВведи названия глав еще раз или /cancel для выхода\")\n return constants.STATE_CHOOSE_DOCUMENT\n\n context.user_data['documents'] = documents\n\n if len(documents) == 1:\n reply = \"Ок, я нашел одну главу: {0} ({1})\".format(documents[0]['name'], documents[0]['id'])\n else:\n reply = \"Ок, я нашел {0} глав.\\n\".format(len(documents))\n for d in documents:\n stage_name = constants.CHAPTER_STAGE_NAMES[get_document_stage(d)]\n reply += \"- {0}: {1}\\n\".format(stage_name, d['name'])\n\n return reply", "def create(self, request, *args, **kwargs):\n logger.debug(u'DocumentDefinition.create ...')\n logger.debug(u'DocumentDefinition.create :: REQUEST: {}'.format(request.REQUEST))\n version = request.version\n if '@' in version:\n branch_name, tag_name = version.split('@')\n else:\n tag_name = version\n branch_name = None\n logger.debug(u'DocumentDefinition.create :: tag: {}'.format(tag_name))\n now_es = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n if len(kwargs) == 0:\n raise exceptions.XimpiaAPIException(_(u'No document type sent'))\n doc_type = kwargs['doc_type']\n logger.debug(u'DocumentDefinition.create :: doc_type: {}'.format(doc_type))\n # resolve index based on request host for site\n site_slug = get_site(request)\n index = '{}__base'.format(site_slug)\n logger.debug(u'DocumentDefinition.create :: index: {}'.format(index))\n ###############\n # validations\n ###############\n # check user request and user is admin\n if not request.user or (request.user and not request.user.id):\n raise exceptions.XimpiaAPIException(_(u'User needs to be authenticated'))\n user = request.user\n logger.debug(u'DocumentDefinition.create :: request.user: {}'.format(user))\n groups = user.document['groups']\n logger.debug(u'DocumentDefinition.create :: groups: {}'.format(groups))\n admin_groups = filter(lambda x: x['name'] == 'admin', groups)\n if not admin_groups:\n raise exceptions.XimpiaAPIException(_(u'User needs to be admin'))\n # generate mappings\n doc_def = DocumentDefinition(json.loads(request.body), doc_type, user, tag_name=tag_name,\n branch_name=branch_name)\n document_definition_input = doc_def.logical\n logger.info(u'DocumentDefinition.create :: document_definition_input: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(document_definition_input)))\n bulk_queries = list()\n # Check db validations: tag exists, document definition not exists, no fields\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'document-definition'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'document-definition__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # meta_data = document_definition_input['_meta']\n # Check mapping does not exist\n es_response_raw = requests.get(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n )\n )\n existing_mapping = es_response_raw.json()\n if existing_mapping:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists :: {}'.format(\n existing_mapping\n )))\n # Check no fields for doc type\n logger.debug(u'DocumentDefinition.create :: mapping in ES: {}'.format(es_response_raw.content))\n\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'field-version'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'field-version__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # Validate tag exists\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'tag'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'tag__slug__v1.raw__v1': slugify(tag_name)\n }\n }\n }\n )\n )\n )\n # print ''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n es_response_raw = requests.get(\n '{host}/_msearch'.format(\n host=settings.ELASTIC_SEARCH_HOST\n ),\n data=''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response validations: {}'.format(\n es_response\n ))\n responses = es_response.get('responses', [])\n if responses[0]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[1]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[2]['hits']['total'] == 0:\n raise exceptions.XimpiaAPIException(_(u'Tag does not exist'))\n ##################\n # End validations\n ##################\n\n # Build data\n doc_mapping = doc_def.get_mappings()\n fields_version_str = doc_def.get_field_versions(index, user)\n # Create document definition document\n physical = doc_def.get_physical()\n logger.debug(u'_create_index :: document definition: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(physical))\n )\n es_response_raw = requests.post(\n '{host}/{index}/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=u'{}__document-definition'.format(index),\n doc_type='document-definition'\n ),\n data=json.dumps(\n physical\n )\n )\n es_response = es_response_raw.json()\n document_created = es_response\n logger.info(u'DocumentDefinition.create :: response create document definition: {}'.format(\n es_response\n ))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error creating document definition')\n # Bulk insert for all fields\n # print fields_version_str\n es_response_raw = requests.post(\n '{host}/_bulk'.format(host=settings.ELASTIC_SEARCH_HOST),\n data=fields_version_str,\n headers={'Content-Type': 'application/octet-stream'},\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response create field versions: {}'.format(\n es_response\n ))\n if 'errors' in es_response and es_response['errors']:\n raise exceptions.XimpiaAPIException(u'Error creating fields')\n # Create mapping\n logger.debug(u'DocumentDefinition.create :: mappings: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(doc_mapping)\n ))\n es_response_raw = requests.put(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n ),\n data=json.dumps(doc_mapping)\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response put mapping: {}'.format(es_response))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error in saving mappings')\n # output document\n output_document = json.loads(request.body)\n output_document['_id'] = document_created['_id']\n output_document['_version'] = document_created['_version']\n return Response(output_document)", "def getFile(self):\n #try to redetect the filetype\n vim.command(\"filetype detect\")\n #return the filetype\n filetype = vim.eval(\"&ft\")\n #filetype = vim.command(\"&ft\")\n if filetype:\n for file in self.template_files:\n if filetype.lower() in file.lower():\n self.hasTemplate = True\n return open(self.template_folder + \"/\" + file, 'r')\n return None", "def parse_docs(filename):\n \n # open word doc\n word = win32.gencache.EnsureDispatch('Word.Application')\n doc = word.Documents.Open(os.getcwd() + '/' + filename + \".doc\")\n doc.Activate()\n \n # read word doc as list of lists\n data = [doc.Tables(i).Range.Text for i in range(1,5)]\n data = ''.join(data)\n data = data.replace('\\r\\x07\\r\\x07', ', ')\n data = data.replace('\\r\\x07', ', ')\n data = data.split(\", \")\n \n # separate columns into lists\n varname = data[0::4]\n description = data[1::4]\n valuelineref = data[2::4]\n type = data[3::4]\n\n # create pandas dataframe and clean up\n df = pd.DataFrame(list(zip(varname, description, valuelineref, type)))\n doc.Close(True) # is this a function?\n headers = df.iloc[0]\n df = df[1:]\n df.columns = headers\n df['Variable Name'] = df['Variable Name'].str.replace('\\r','')\n \n # store as csv\n df.to_csv(filename + '.csv', index = False)\n return df", "def choose_file(self):\n pass", "def getDocuments(self):\n return self.objectValues('Multifile')", "def test_document_retrieval(self):", "def getdoc(doctype, name, user=None):\n\n\timport webnotes\n\t\n\tif not (doctype and name):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tif not name: \n\t\tname = doctype\n\n\tif not webnotes.conn.exists(doctype, name):\n\t\treturn []\n\n\ttry:\n\t\tbean = webnotes.bean(doctype, name)\n\t\tbean.run_method(\"onload\")\n\n\t\tdoclist = bean.doclist\n\n\t\t# add file list\n\t\tset_docinfo(doctype, name)\n\t\t\n\texcept Exception, e:\n\t\twebnotes.errprint(webnotes.utils.getTraceback())\n\t\twebnotes.msgprint('Did not load.')\n\t\traise e\n\n\tif bean and not name.startswith('_'):\n\t\twebnotes.user.update_recent(doctype, name)\n\t\n\twebnotes.response['docs'] = doclist", "def __init__(self, mimetype):\n self.mimetype = mimetype\n self.name = \"Filters.document.mime_type('{}')\".format(self.mimetype)", "def document(cls):\n header = cls.document_header()\n content = cls.document_content()\n index = cls.document_index()\n package = cls.package().name\n filename = inspect.getsourcefile(cls)\n try:\n lines = inspect.getsourcelines(cls)\n except IOError:\n lines = ([], 0)\n line = lines[1]+1\n return DocEntry(\n header, content, index=index,\n package=package, filename=filename, line=line)", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()", "def get_docx_document(docx_file: str) -> docx.Document:\n if os.path.isfile(docx_file):\n return docx.Document(docx_file)\n else:\n logging.error(\"Could not find file at: \" + str(docx_file))\n return docx.Document()", "def get_document(self, value, key='name'):\n if value.endswith('.json'):\n key = 'filename'\n return [x for x in self.vocab if x[key] == value][0]", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def documento(self):\n return self.persona.documento", "def parse(self, fileName):\n from lxml import etree\n \n schemadoc = etree.parse(StringIO(\"\"\"\\\n<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <!-- the runscript -->\n <xs:complexType name=\"runscriptType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"machine\" type=\"machineType\"/>\n <xs:element name=\"system\" type=\"systemType\">\n <!-- setting keys have to be unique per system/version-->\n <!-- unfortunately i have found no way to create a link between settings and systems -->\n <!-- schematron should be able to do this but the lxml implementation seems to be incomplete-->\n <xs:unique name=\"settingKey\">\n <xs:selector xpath=\"setting\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n <xs:element name=\"config\" type=\"configType\"/>\n <xs:element name=\"benchmark\" type=\"benchmarkType\"/>\n <xs:element name=\"pbsjob\" type=\"pbsjobType\"/>\n <xs:element name=\"condorjob\" type=\"condorjobType\"/>\n <xs:element name=\"seqjob\" type=\"seqjobType\"/>\n <xs:element name=\"project\" type=\"projectType\"/>\n </xs:choice>\n <xs:attribute name=\"output\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a project -->\n <xs:complexType name=\"projectType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"runspec\" type=\"runspecType\"/>\n <xs:element name=\"runtag\" type=\"runtagType\"/>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"job\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a machine -->\n <xs:complexType name=\"machineType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"cpu\" type=\"xs:token\" use=\"required\"/>\n <xs:attribute name=\"memory\" type=\"xs:token\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a system -->\n <xs:complexType name=\"systemType\">\n <xs:choice minOccurs=\"1\" maxOccurs=\"unbounded\">\n <xs:element name=\"setting\">\n <xs:complexType>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"tag\">\n <xs:simpleType>\n <xs:list itemType=\"nameType\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"ppn\" type=\"xs:positiveInteger\"/>\n <xs:attribute name=\"procs\">\n <xs:simpleType>\n <xs:list itemType=\"xs:integer\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"pbstemplate\" type=\"xs:string\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"measures\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"config\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- generic attributes for jobs-->\n <xs:attributeGroup name=\"jobAttr\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"timeout\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"runs\" type=\"xs:positiveInteger\" use=\"required\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:attributeGroup>\n \n <!-- a seqjob -->\n <xs:complexType name=\"seqjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"parallel\" type=\"xs:positiveInteger\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a pbsjob -->\n <xs:complexType name=\"pbsjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a condorjob -->\n <xs:complexType name=\"condorjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"condortemplate\" type=\"xs:string\" use=\"required\"/>\n <xs:attribute name=\"basedir\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n\n\n <!-- a config -->\n <xs:complexType name=\"configType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"template\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a benchmark -->\n <xs:complexType name=\"benchmarkType\">\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:choice>\n <xs:element name=\"files\">\n <xs:complexType>\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"add\">\n <xs:complexType>\n <xs:attribute name=\"file\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"folder\">\n <xs:complexType>\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"ignore\">\n <xs:complexType>\n <xs:attribute name=\"prefix\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n </xs:sequence>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- common attributes for runspec/runtag -->\n <xs:attributeGroup name=\"runAttr\">\n <xs:attribute name=\"machine\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"benchmark\" type=\"nameType\" use=\"required\"/>\n </xs:attributeGroup>\n \n <!-- a runspec -->\n <xs:complexType name=\"runspecType\">\n <xs:attribute name=\"system\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"setting\" type=\"nameType\" use=\"required\"/>\n <xs:attributeGroup ref=\"runAttr\"/>\n </xs:complexType>\n \n <!-- a runtag -->\n <xs:complexType name=\"runtagType\">\n <xs:attributeGroup ref=\"runAttr\"/>\n <xs:attribute name=\"tag\" type=\"tagrefType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- simple types used througout the above definitions -->\n <xs:simpleType name=\"versionType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9a-zA-Z._-]+\"/>\n </xs:restriction>\n </xs:simpleType>\n\n <xs:simpleType name=\"timeType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9]+(:[0-9]+(:[0-9]+)?)?\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"tagrefType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"(\\*all\\*)|([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*)([ ]*\\|[ ]*([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*))*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"nameType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[A-Za-z_\\-0-9]*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <!-- the root element -->\n <xs:element name=\"runscript\" type=\"runscriptType\">\n <!-- machine keys -->\n <xs:keyref name=\"machineRef\" refer=\"machineKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@machine\"/>\n </xs:keyref>\n <xs:key name=\"machineKey\">\n <xs:selector xpath=\"machine\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- benchmark keys -->\n <xs:keyref name=\"benchmarkRef\" refer=\"benchmarkKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@benchmark\"/>\n </xs:keyref>\n <xs:key name=\"benchmarkKey\">\n <xs:selector xpath=\"benchmark\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- system keys -->\n <xs:keyref name=\"systemRef\" refer=\"systemKey\">\n <xs:selector xpath=\"project/runspec\"/>\n <xs:field xpath=\"@system\"/>\n <xs:field xpath=\"@version\"/>\n </xs:keyref>\n <xs:key name=\"systemKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@name\"/>\n <xs:field xpath=\"@version\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"configRef\" refer=\"configKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@config\"/>\n </xs:keyref>\n <xs:key name=\"configKey\">\n <xs:selector xpath=\"config\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"jobRef\" refer=\"jobKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@job\"/>\n </xs:keyref>\n <xs:key name=\"jobKey\">\n <xs:selector xpath=\"seqjob|pbsjob|condorjob\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- project keys -->\n <xs:unique name=\"projectKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n</xs:schema>\n\"\"\"))\n schema = etree.XMLSchema(schemadoc)\n\n doc = etree.parse(open(fileName))\n schema.assertValid(doc)\n \n root = doc.getroot()\n run = Runscript(root.get(\"output\"))\n\n for node in root.xpath(\"./pbsjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = PbsJob(node.get(\"name\"), node.get(\"memout\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./condorjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = CondorJob(node.get(\"name\"), tools.xmlTime(node.get(\"memout\")), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), node.get(\"condortemplate\"),node.get(\"basedir\"), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./seqjob\"):\n attr = self._filterAttr(node, [\"name\", \"timeout\", \"runs\", \"parallel\"])\n job = SeqJob(node.get(\"name\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), int(node.get(\"parallel\")), attr)\n run.addJob(job)\n \n for node in root.xpath(\"./machine\"):\n machine = Machine(node.get(\"name\"), node.get(\"cpu\"), node.get(\"memory\"))\n run.addMachine(machine)\n\n for node in root.xpath(\"./config\"):\n config = Config(node.get(\"name\"), node.get(\"template\"))\n run.addConfig(config)\n \n compoundSettings = {}\n sytemOrder = 0 \n for node in root.xpath(\"./system\"):\n system = System(node.get(\"name\"), node.get(\"version\"), node.get(\"measures\"), sytemOrder)\n settingOrder = 0\n for child in node.xpath(\"setting\"):\n attr = self._filterAttr(child, [\"name\", \"cmdline\", \"tag\"])\n compoundSettings[child.get(\"name\")] = []\n if \"procs\" in attr:\n procs = [int(proc) for proc in attr[\"procs\"].split(None)]\n del attr[\"procs\"]\n else: procs = [None]\n if \"ppn\" in attr: \n ppn = int(attr[\"ppn\"])\n del attr[\"ppn\"]\n else: ppn = None\n if \"pbstemplate\" in attr:\n pbstemplate = attr[\"pbstemplate\"]\n del attr[\"pbstemplate\"]\n else: pbstemplate = None\n if child.get(\"tag\") == None: tag = set()\n else: tag = set(child.get(\"tag\").split(None))\n for num in procs:\n name = child.get(\"name\")\n if num != None: \n name += \"-n{0}\".format(num)\n compoundSettings[child.get(\"name\")].append(name)\n setting = Setting(name, child.get(\"cmdline\"), tag, settingOrder, num, ppn, pbstemplate, attr)\n system.addSetting(setting)\n settingOrder += 1\n\n run.addSystem(system, node.get(\"config\"))\n sytemOrder += 1\n \n for node in root.xpath(\"./benchmark\"):\n benchmark = Benchmark(node.get(\"name\"))\n for child in node.xpath(\"./folder\"):\n element = Benchmark.Folder(child.get(\"path\"))\n for grandchild in child.xpath(\"./ignore\"):\n element.addIgnore(grandchild.get(\"prefix\"))\n benchmark.addElement(element)\n for child in node.xpath(\"./files\"):\n element = Benchmark.Files(child.get(\"path\"))\n for grandchild in child.xpath(\"./add\"):\n element.addFile(grandchild.get(\"file\"))\n benchmark.addElement(element)\n run.addBenchmark(benchmark)\n \n for node in root.xpath(\"./project\"):\n project = Project(node.get(\"name\"))\n run.addProject(project, node.get(\"job\"))\n for child in node.xpath(\"./runspec\"):\n for setting in compoundSettings[child.get(\"setting\")]: \n project.addRunspec(child.get(\"machine\"),\n child.get(\"system\"),\n child.get(\"version\"),\n setting,\n child.get(\"benchmark\"))\n \n for child in node.xpath(\"./runtag\"):\n project.addRuntag(child.get(\"machine\"), \n child.get(\"benchmark\"),\n child.get(\"tag\"))\n \n return run" ]
[ "0.5883688", "0.5880611", "0.58433646", "0.58253616", "0.57991326", "0.57213825", "0.5719066", "0.5658872", "0.56124973", "0.5600502", "0.5573449", "0.55684054", "0.55032", "0.5484727", "0.54810476", "0.5461333", "0.5415002", "0.54017013", "0.53869313", "0.538439", "0.5352028", "0.53190356", "0.5317983", "0.5281265", "0.5280905", "0.52735674", "0.52566975", "0.524935", "0.5242105", "0.52274734", "0.52097666", "0.5192089", "0.51710093", "0.5128564", "0.5128356", "0.5127242", "0.5125626", "0.51238346", "0.51201075", "0.5115225", "0.51137644", "0.510278", "0.51026505", "0.51016027", "0.50893193", "0.50871444", "0.50657684", "0.50599724", "0.5059082", "0.50532293", "0.5042078", "0.50399846", "0.5028035", "0.5014771", "0.50043434", "0.4998458", "0.4994553", "0.49775258", "0.4973609", "0.49676636", "0.49671495", "0.49652982", "0.49602896", "0.4942609", "0.4938281", "0.49375278", "0.4933115", "0.49194357", "0.49184906", "0.49118227", "0.49033704", "0.48995966", "0.4897438", "0.4892971", "0.4880239", "0.48781925", "0.48780826", "0.48737568", "0.48660868", "0.4861025", "0.48599818", "0.48549208", "0.4854217", "0.48469105", "0.48452085", "0.48444498", "0.48274416", "0.48261738", "0.48241818", "0.48237473", "0.48195863", "0.48168874", "0.4813349", "0.48093414", "0.48006243", "0.4796654", "0.47944906", "0.47915974", "0.47904313", "0.47878537", "0.4784552" ]
0.0
-1
Creates variable of chosen folder
def entry_set_folder(self, entry): global folder_name folder_name = filedialog.askdirectory() entry.delete(0, 'end') entry.insert(tk.END, folder_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder():\n return input(\"Folder: \")", "def create_folder(self):\n cur_dir=os.getcwd()\n unique=False\n dirlist= [item for item in os.listdir(cur_dir) if os.path.isdir(os.path.join(cur_dir,item))]\n folder_name='taxonomy_{}_{}'.format(self.place,self.year)\n j=1\n while not unique:\n if folder_name in dirlist:\n folder_name='taxonomy_{}_{}({})'.format(self.place,self.year,str(j))\n j+=1\n else:\n unique=True\n new_folder=os.path.join(cur_dir,folder_name)\n os.mkdir(new_folder)\n os.chdir(new_folder)\n return folder_name", "def createVariantDirs(shotFolder, varType, name, *args):\n #do this again (like above) just to make sure we don't repeat a name if we're calling externally\n cFuncs.createVariantDirectories(shotFolder, varType, name)\n cmds.warning(\"Created variant: {0} in {1}\".format(name, cFuncs.fixPath(os.path.join(shotFolder, varType))))\n if cmds.window(\"createVarWin\", exists=True):\n cmds.deleteUI(\"createVarWin\")\n\n #refresh the shot win\n if cmds.window(\"shotWin\", exists=True):\n #pth = utils.PathManager(shotFolder)\n import chrlx_pipe.shotWin as shotWin\n shotWin.populateWindow()", "def createWinVarDirs(shotFolder, varType, *args):\n\n name = cmds.textFieldGrp(vwidgets[\"name\"], q=True, tx=True)\n\n varFold = cFuncs.fixPath(os.path.join(shotFolder, varType))\n variants = cFuncs.getShotVariantList(varFold)\n\n if variants and (name in variants):\n cmds.confirmDialog(t=\"Name Exists!\", m = \"There is already a shot of this name\\nin this spot! Please enter another.\")\n return()\n\n createVariantDirs(shotFolder, varType, name)\n if cmds.window(\"createVarWin\", exists=True):\n cmds.delete(\"createVarWin\")\n\n # refresh the shot win\n if cmds.window(\"shotWin\", exists=True):\n #pth = utils.PathManager(shotFolder)\n import chrlx_pipe.shotWin as shotWin\n shotWin.populateWindow()", "def getCreate(newFoldername):\r\n current_directory = os.getcwd()\r\n new_directory = os.path.join(current_directory,newFoldername)\r\n \r\n if os.path.exists(new_directory):\r\n return new_directory", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def prepare_folder(self) -> str:\n base_folder = self.config['info']['folder']\n today_folder = f'{datetime.today():%Y-%m-%d}'\n folder = os.path.join(base_folder, today_folder)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n return folder", "def createWinShotDirs(shotFolder, *args):\n\n num = cmds.textFieldGrp(swidgets[\"num\"], q=True, tx=True)\n myChars = [int(s) for s in num if s.isdigit()] # get list of int digits in num\n\n if len(myChars) !=3 : # if we don't have 3 digits. . .\n cmds.warning(\"You need to enter a 3 digit number for the shot!!\")\n return() \n\n shotType = cmds.radioButtonGrp(swidgets[\"version\"], q=True, sl=True)\n if shotType == 1:\n sname = \"shot\"\n if shotType == 2:\n sname = \"previs\"\n\n name = \"{0}{1}\".format(sname, num)\n\n #here we compare that list of assets with our proposed name\n shots = cFuncs.getSpotShotList(shotFolder)\n if name in shots:\n cmds.confirmDialog(t=\"Name Exists!\", m = \"There is already a shot of this name\\nin this project! Please enter another.\")\n return()\n \n shotFolderObj=utils.PathManager(shotFolder)\n#---------------- restore this!! \n # jobDirectoryCreator.createShot(shotFolderObj.jobDirname, shotFolderObj.spotDirname, name)\n\n varName = cmds.textFieldGrp(swidgets[\"variant\"], q=True, tx=True)\n\n thisShotFolder = cFuncs.fixPath(os.path.join(shotFolder, name))\n createVariantDirs(thisShotFolder, \"anm\", varName, *args)\n\n if cmds.window(\"createShotWin\", exists = True):\n cmds.deleteUI(\"createShotWin\")\n\n # refresh the shot win\n if cmds.window(\"shotWin\", exists=True):\n #pth = utils.PathManager(shotFolder)\n import chrlx_pipe.shotWin as shotWin\n shotWin.populateWindow()", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def subFolder(self, value):\r\n self.__folder = str(value)", "def gen_folder(base, e, f, a, k):\n \n return base + 'eps_' + str(e) + '_fp_' + str(f) + \\\n '_areak_' + str(a) + '_kappa_' + str(k)", "def create_summary_folder_name():\n dir_str = '' # ADD PATH\n time_stamp_str = time.strftime(\"%a, %d %b %Y %H:%M:%S/\", time.gmtime())\n param_str = ''\n return dir_str + time_stamp_str + param_str", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def _create_folder(self, unsupported_file: File) -> str:\n if not self.possibilities:\n print(\n f\"----\\nNo folders found in directory. Please enter directory name for \"\n f\"{unsupported_file} file:\\n\"\n )\n else:\n print(\"Please enter directory name:\")\n\n while True:\n folder_name = input()\n checker = [True if char.isalnum() else False for char in folder_name]\n if False not in checker and folder_name not in self.possibilities.keys():\n os.makedirs(folder_name)\n temp_folder = Folder(folder_name)\n self.folders.append(temp_folder)\n if unsupported_file.get_extension():\n temp_folder.files.append(PlaceHolderFile(unsupported_file.name))\n return folder_name\n else:\n print(\"Invalid input\")", "def pickAFolder():\n folder = _tkCall(tkFileDialog.askdirectory)\n if folder == '':\n folder = myro.globvars.mediaFolder\n return folder", "def create_dir(dir_path,plot_type):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUZWXYZ\"\n alphabet += alphabet.lower()\n alphabet += \"01234567890\"\n\n\n if dir_path==None or dir_path=='':\n dir_path=''\n random_dir_name=''.join([choice(alphabet) for i in range(10)])\n dir_path ='./'+plot_type+strftime(\"%Y_%m_%d_%H_%M_%S\")+random_dir_name+'/'\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n return dir_path", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "async def new_folder(name):\n res = await joplin.create_folder(folder=name)\n return res.json()['id']", "def populateOutputFolder(self):\n filePath = pm.fileDialog2(fileMode=2,\n startingDirectory=self.startDir,\n fileFilter=' Shifter Game Assembly folder')\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n self.gtUIInst.path_lineEdit.setText(filePath)", "def createFolder(self):\n\n self.directory = \"D:\\\\CompositionHelper\"\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n print ('Created new folder')", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def set_folder(tipo):\n loc = path.join(pardir, 'SHOTS')\n makedirs(loc, exist_ok=True)\n # 'data_type' is the subfolder, depending on data type. default is a shot.\n data_type = 'shots'\n if tipo == 'mirror':\n data_type = 'tests'\n elif 'test' in tipo:\n data_type = 'tests'\n elif tipo == 'clean':\n data_type = 'cleaning_plasma'\n folder = path.join(loc, data_type)\n makedirs(folder, exist_ok=True)\n return path.join(loc, data_type)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def initializeFolder(savePath,name,bOverWrite):\n if savePath==None:\n savePath=os.path.join('.','results')\n path=os.path.join(savePath,name)\n path_copy=deepcopy(path)\n if not bOverWrite:\n if os.path.exists(path_copy):\n fi=2\n poss_path=os.path.join(path_copy,'Run (%i)'%fi)\n while os.path.exists(poss_path):\n fi+=1\n poss_path=os.path.join(path_copy,'Run (%i)'%fi)\n path=poss_path\n \n os.makedirs(path)\n \n bexisted=True\n if not os.path.exists(path):\n os.makedirs(path)\n bexisted=False\n \n if path[-1]!=os.path.sep:\n path+=os.path.sep\n \n print \"Results will be saved in %s folder '%s'\"%('existing' if bexisted else 'new',path) \n return path", "def createFolder(self):\n raise NotImplementedError", "def __create_folder(self, stamp_unique=True):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among %r' % self.params.keys())\n # create results folder for experiments\n path_exp = create_experiment_folder(\n self.params.get('path_out'), self.__class__.__name__, self.params.get('name'), stamp_unique\n )\n self.params['path_exp'] = path_exp\n save_config_yaml(os.path.join(path_exp, self.NAME_CONFIG_YAML), self.params)", "def build(self):\n if self.project_folder_name:\n folder_id = self._project_folder_id()\n if self.sub_folder_name:\n folder_id = self._get_sub_folder_id(folder_id)\n\n if folder_id:\n file_id = create_file_in_drive(self.sheet_title, folder_id)\n else:\n file_id = create_file_in_drive(self.sheet_title)\n\n return file_id", "def create_experiment_folder(path_out, dir_name, name='', stamp_unique=True):\n assert os.path.exists(path_out), 'missing base folder \"%s\"' % path_out\n date = time.gmtime()\n if isinstance(name, str) and name:\n dir_name = '{}_{}'.format(dir_name, name)\n path_exp = os.path.join(path_out, dir_name)\n if stamp_unique:\n path_exp += '_' + time.strftime(FORMAT_DATE_TIME, date)\n path_created = None\n while not path_created:\n logging.warning('particular out folder already exists')\n if path_created is not None:\n path_exp += '-' + str(np.random.randint(0, 100))\n path_created = create_folder(path_exp, ok_existing=False)\n else:\n path_created = create_folder(path_exp, ok_existing=False)\n logging.info('created experiment folder \"%r\"', path_created)\n return path_exp", "def __init__(self, folder: str):\n self.folder = folder", "def createVariant(fType, *args):\n # pass the shotFolder and fType to chrlx_pipe.createDirectories.createVariant(shotFolder, fType)\n shot = cmds.textScrollList(widgets[\"shotListTSL\"], q=True, si=True)\n if shot:\n createDir.createVariant(pi.currentShotFolder, fType)\n else:\n cmds.warning(\"You need to select a shot in which to create a new variant!\")", "def _gen_folder_(self):\n os.makedirs(self.fld_name)\n dic_json = {\"PARAM_EXCOND\": self.cond_ex,\n \"PARAM_CALCOND\": self.cond_cal,\n \"PARAM_MODELCONST\": self.const_model\n }\n with open(os.path.join(self.fld_name, \"cond.json\"), \"w\") as f:\n json.dump(dic_json, f, ensure_ascii=False, indent=4)", "def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "def create_folder(self, c_path):\n raise NotImplementedError", "def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)", "def getDir(self):\n self.folder = filedialog.askdirectory()\n self.dir_lbl[\"text\"] = self.folder", "def mkdir(path):", "def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get pathname\n pathname = tk.filedialog.askdirectory(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def create_folder(folder_path: List[str]) -> str:\n drive = _drive_gen()\n return _create_or_find_folder(folder_path, drive)", "def folder_runnum():\n now = datetime.datetime.now()\n runnum = 1\n while True:\n folder_name = f\"astroNN_{now.month:0{2}d}{now.day:0{2}d}_run{runnum:0{3}d}\"\n if not os.path.exists(folder_name):\n break\n else:\n runnum += 1\n\n return folder_name", "def make_vars(self):\n here=self.PWD\n PWD_UP1=os.path.dirname(here)\n PWD_UP2=os.path.dirname(PWD_UP1)\n PWD_UP3=os.path.dirname(PWD_UP2)\n PWD_UP4=os.path.dirname(PWD_UP3)\n PWD_UP5=os.path.dirname(PWD_UP4)\n return { 'PWD_UP1':PWD_UP1, 'PWD_UP2':PWD_UP2,\n 'PWD_UP3':PWD_UP3, 'PWD_UP4':PWD_UP4,\n 'PWD_UP5':PWD_UP5, 'OUTPUT_PATH':self.outloc,\n 'PWD': here }", "def create_folder(self, foldername: str) -> int:\n raise NotImplementedError", "def selectFolder(): \r\n directory = filedialog.askdirectory(\r\n title='Select file'\r\n )\r\n return directory", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def path_of_image():\n top.folder_name = filedialog.askdirectory(title=\"select directory\",\n initialdir=\"C:/Users/Ayush sagore/JUPITER NOTEBOOK ML/CNN Model/\"\n \"test_dataset/\")\n path_name.insert(0, top.folder_name)", "def open_folder(self, event):\n if self.advancedMenu:\n self.advancedMenu.Show(False)\n home = os.path.expanduser('~')\n c = config.Config()\n panda = None\n if c.username:\n # try for full path if there is a username\n panda = os.path.join(home, 'Digital Panda', c.username)\n if not os.path.exists(panda):\n # if the path doesn't exist - reset\n panda = None\n if not panda:\n # get base folder (without acccount)\n panda = os.path.join(home, 'Digital Panda')\n if not os.path.exists(panda):\n try:\n os.makedirs(panda)\n except:\n print \"TODO: need to handle folder creation failure!\"\n open_folder(panda)", "def gen_temp_wishlist(self, identifier: str):\n tempdir_name = f\"{identifier}_{randint(1000,9999)}_repo\"\n # for when run from wish repo's home\n basedir = Path(__file__).parent.resolve()\n newdir = basedir / tempdir_name\n shutil.copytree(Path(basedir/\"fixture_repo\"), newdir)\n return newdir", "def identify_folder(self, folder):", "def create_dir_structure():\n LOG.info('In create_dir_structure')\n OutputWrite.change_to_script_directory(__file__)\n path = os.path.abspath(os.path.join('..', 'results',\n global_constants.TEXT_BOARD,\n global_constants.TEXT_INTERFACE,\n global_constants.TEXT_DEVICE,\n global_constants.TEST_EXECUTION_NAME\n ))\n LOG.debug('Path to be Created = {0}'.format(path))\n os.makedirs(path, exist_ok=True, mode=0o755)\n for item in global_constants.TEST_CASE_LIST_NAMES:\n in_path = os.path.exists(os.path.join(path, item))\n if not os.path.exists(in_path):\n LOG.debug('Path with Test Case name = {0}'.format(in_path))\n os.mkdir(in_path)\n LOG.debug('Path = {0}'.format(path))\n return path", "def create_dir(dir_type, base_path):\n\n path = os.path.join(base_path, dir_type)\n if not os.path.exists(path):\n os.mkdir(path)\n print('Created directory {!r}'.format(path))\n else:\n print('Found directory {!r}'.format(path))\n\n\n if dir_type.find('figure') != -1:\n sc.settings.figdir = path\n scv.settings.figdir = path\n\n return path", "def dirCreate(newFoldername):\r\n current_directory = os.getcwd()\r\n new_directory = os.path.join(current_directory,newFoldername)\r\n \r\n if not os.path.exists(new_directory):\r\n os.makedirs(new_directory)\r\n return new_directory", "def create_new(self, root, name_length):\n self.name = create_random_string(name_length)\n self.ctime = datetime.datetime.now()\n date_time = datetime.datetime.strftime(self.ctime, \"%Y%m%d_%H%M%S\")\n self.folder = f\"{date_time}_{self.name}\"\n self.path = os.path.join(root, self.folder)\n try:\n os.makedirs(self.path)\n print(f\"Created folder {self.folder}\")\n except OSError:\n print(f\"Directory {self.folder} already exists\")\n except:\n print(f\"Cannot create folder: {self.folder}\")\n raise", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def createFolder(self):\n folderName, ok = QtWidgets.QInputDialog.getText(self, 'Folder Name', 'Enter the folder name :',\n QtWidgets.QLineEdit.Normal)\n\n if ok:\n parent = self.fileDir\n currentPath = self.dataDir\n if self.fileDir.selectedItems():\n parent = self.fileDir.selectedItems()[-1]\n currentPath = str(parent.toolTip(0))\n\n if not os.path.isdir('%s/%s' % (currentPath, str(folderName))):\n item = QtWidgets.QTreeWidgetItem(parent)\n\n item.setText(0, str(folderName))\n item.setToolTip(0, '%s/%s' % (currentPath, str(folderName)))\n\n # connect icon\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap('%s/folder.png' % (self.iconsDir)), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n item.setIcon(0, icon)\n\n # be careful about shiboken2, you can use 'is' and 'is not' instead of using operator '==' and '!='\n if parent is not self.fileDir:\n self.fileDir.setItemExpanded(parent, True)\n self.fileDir.setItemSelected(parent, False)\n\n self.fileDir.setItemSelected(item, True)\n\n os.makedirs('%s/%s' % (currentPath, str(folderName)))", "def presentation(self, value):\r\n realpath = os.path.expanduser(value)\r\n if self.auto_create:\r\n if not os.path.exists(realpath):\r\n os.makedirs(realpath)\r\n return realpath", "def create(self, basedir, outdir, name, prefix=None):", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def setUpTempVariables(folder):\n\tcwd = os.getcwd()\n\tcmd = \"set path=\" + folder + fileSeperator + \"bin;%\" + \"path%\"\n\tprint cmd\n\tos.system(cmd)", "def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content", "def get_folder_path(folder_name: str):\n TRADER_DIR, TEMP_DIR = _get_trader_dir(\".ctpbee\")\n folder_path = TEMP_DIR.joinpath(folder_name)\n if not folder_path.exists():\n folder_path.mkdir()\n return folder_path", "def createWorkingFolder(newpath):\n newpath = pdbName[:-4]+'_ARM_input'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n globals().update({ \"workingFolder\" : newpath+\"/\"})\n\n shutil.move( pdbName, newpath+\"/\"+pdbName) \n if glob.glob(\"*.seqmut\"):\n MutFile = glob.glob(\"*.seqmut\")[0]\n shutil.copyfile(str(MutFile), newpath+\"/\"+pdbName[:-4]+\".seqmut\")\n else:\n pass\n os.chdir(newpath)", "def createWinAssetDirs(assFolder, *args):\n\n name = cmds.textFieldGrp(awidgets[\"name\"], q=True, tx=True)\n types = cmds.radioButtonGrp(awidgets[\"type\"], q=True, sl=True)\n\n #here we compare that list of assets with our proposed name\n assets = cFuncs.getSpotAssetList(assFolder)\n if name in assets:\n cmds.confirmDialog(t=\"Name Exists!\", m = \"There is already an asset of this name\\nin this project! Please enter another.\")\n return()\n\n if types == 1:\n assType = \"characters\"\n elif types == 2:\n assType = \"props\"\n elif types == 3:\n assType = \"sets\"\n\n createAssDirs(assFolder, assType, name)", "def create_experiment_folder(path_out, dir_name, name='', stamp_unique=True):\n if not os.path.isdir(path_out):\n raise FileNotFoundError('missing base folder \"%s\"' % path_out)\n date = time.gmtime()\n if isinstance(name, str) and name:\n dir_name = '%s_%s' % (dir_name, name)\n # if you require time stamp\n if stamp_unique:\n path_stamp = time.strftime(FORMAT_DATE_TIME, date)\n # prepare experiment path with initial timestamp - now\n path_exp = os.path.join(path_out, '%s_%s' % (dir_name, path_stamp))\n if os.path.isdir(path_exp):\n logging.warning('particular out folder already exists')\n path_exp += '-' + str(uuid.uuid4().hex)\n else:\n path_exp = os.path.join(path_out, dir_name)\n path_created = create_folder(path_exp, ok_existing=False)\n logging.info('created experiment folder \"%r\"', path_created)\n return path_exp", "def old_create_dir_struct(self, create_first_rev_folder=\"True\"):\n # | - create_dir_struct\n for job in self.job_var_lst:\n if create_first_rev_folder == \"True\":\n path = self.var_lst_to_path(job) + \"_1\"\n elif create_first_rev_folder == \"False\":\n path = self.var_lst_to_path(job)\n\n path = self.root_dir + \"/\" + path\n\n if os.path.exists(path):\n mess = \"Path already exists: \" + str(path)\n print(mess)\n\n elif not os.path.exists(path):\n os.makedirs(path)\n\n # | - Creating Variable Text Files Through Directoy Structure\n for job in self.job_var_lst:\n path = self.var_lst_to_path(job)\n path = self.root_dir + \"/\" + path\n\n file_name = path + \"job_dir_level\"\n with open(file_name, \"w\") as fle:\n fle.write(\"\\n\")\n\n for root, dirs, files in os.walk(self.root_dir + \"/data/\"):\n if \"job_dir_level\" in files:\n continue\n\n else:\n prop_lst = []\n for folder in dirs:\n tmp = self.sep.join(folder.split(self.sep)[1:])\n\n prop = self.__replace_p_for_per__(tmp)\n prop = self.__replace_negative_for_n__(prop)\n prop_lst.append(prop)\n\n for key, value in self.level_entries.items():\n if set(prop_lst) == set(map(str, value)):\n\n file_name = root + \"/properties.txt\"\n with open(file_name, \"w\") as fle:\n fle.write(key + \"\\n\")\n\n # f = open(root + \"/properties.txt\", \"w\")\n # f.write(key + \"\\n\")\n # f.close()\n # __|\n\n # self.__create_dir_structure_file__()\n\n # | - folders_exist attribute should be True from now on\n file_name = self.root_dir + \"/jobs_bin/.folders_exist\"\n with open(file_name, \"w\") as fle:\n fle.write(\"\\n\")\n\n self.folders_exist = self.__folders_exist__(True)\n # __|\n\n # __|", "def createValDir(self):\n\t\tself.setDirNames()\n\t\tself.setScreenXmlFile()\n\n\t\tself.rollValDir \t = self.roll + \"/screenval\"\n\t\tcmd = 'mkdir -p %s' % (self.rollValDir)\n\t\tos.system(cmd)\n\n\t\tself.createValDirFiles()\n\t\treturn", "def set_adjustment_folder_name_to_create_new_one(self, folder_name_prefix):\n global adjustment_folder_name\n adjustment_folder_name = folder_name_prefix + self.random_string_generator(size=4)\n self.set_value_into_input_field(self.adjustment_folder_name_textbox_locator, adjustment_folder_name)", "def rand_dir():\n\n return random.choice([\"U\", \"D\", \"L\", \"R\"])", "def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")", "def setDestFolder(self, offset=0):\n while True:\n tempDest = input(\n offset * \" \" + \"Specify a sub-folder name to save the output files [%s]: \" % self.destFolder) or self.destFolder\n\n # If the folder does not exist, try to create it\n if not os.path.exists(self.currentPath + os.sep + tempDest):\n try:\n os.mkdir(tempDest)\n self.destFolder = tempDest\n self.destPath = self.currentPath + os.sep + self.destFolder\n break\n except OSError:\n print(\"Invalid folder name!\")\n\n # If it does exist set the destPath to it\n else:\n self.destFolder = tempDest\n self.destPath = self.currentPath + os.sep + self.destFolder\n break", "def generate_temp_folder(self):\n folder_name = str(time.time()).replace(\".\", \"\")\n folder_path = os.path.join(self.temp_folder, folder_name)\n os.makedirs(folder_path)\n self.logger.debug(f\"Created nested temp folder at {folder_path}\")\n return folder_path", "def create_project_folder(self):\n\t\tif not os.path.exists(self.segment_path):\n\t\t\tfileutil.makedirs(self.segment_path)", "def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)", "def dirCapture(parameter, path):\r\n # N should be argument of instances NO.\r\n N = parameter\r\n # set path for the captured frames\r\n cpath = path + '%d' % N + '/'\r\n # create directory if not exist\r\n while (os.path.exists(cpath)):\r\n # print('instance N%d' % N + ' exists')\r\n N = N + 1\r\n cpath = path + '%d' % N + '/'\r\n\r\n dir = os.path.dirname(cpath)\r\n # print('create folder'+cpath)\r\n os.makedirs(cpath)\r\n return N, cpath", "def create_temp_folder():\n path_join = os.path.join(tempfile.gettempdir(), id_generator(5))\n os.makedirs(path_join)\n return path_join", "def generate_subdir(channel, energy=13):\n return os.path.join('%dTeV' % energy, channel, strftime(\"%d_%b_%y\"))", "def Directory(self) -> str:", "def _create_and_get_save_directory(self) -> [str, str]:\n directory = \"{}/{}/{}/{}\".format(self.save_path, EXPERIMENTS_DIRECTORY, self.name, self.run_id)\n sourcecode_directory = \"{}/sourcecode\".format(directory)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n raise Exception(\"Experiment run [{}] already exists for experiment [{}]\".format(self.run_id, self.name))\n return directory, sourcecode_directory", "def dir_assignment(assignment):\n return os.path.join(repository, assignment)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def create_subdirectory(dir_path, sub_name_or_keyword):\n if not os.path.exists(dir_path):\n print(\"ERROR (global function 'create_subdirectory').\"),\n print(\"No directory with path '%s'.\"% dir_path)\n return\n #\n if sub_name_or_keyword == 'run':\n date_and_time = [time.localtime()[i] for i in range(5)]\n date_and_time[0] = '{:04d}'.format(int(date_and_time[0]))\n for i in range(1,5):\n date_and_time[i] = '{:02d}'.format(int(date_and_time[i]))\n sub_name = '-'.join(date_and_time[0:4]) + 'h' + date_and_time[4] +'min'\n else:\n sub_name = sub_name_or_keyword\n #\n result = os.path.join(dir_path, sub_name)\n #\n if not os.path.exists(result):\n os.mkdir(result)\n return result", "def data_loader_new_directory_name(self, working_dir: Path):\n\n wd = SnowfakeryWorkingDirectory(working_dir)\n key = wd.index\n if key not in self.cached_counts:\n self.cached_counts[key] = wd.get_record_counts()\n\n if not self.run_until.sobject_name:\n return working_dir\n\n count = self.cached_counts[key][self.run_until.sobject_name]\n\n path, _ = str(working_dir).rsplit(\"_\", 1)\n new_working_dir = Path(path + \"_\" + str(count))\n return new_working_dir", "def test_create_folder(self):\n test = Server()\n inputs = [['create_folder','oook'],['create_folder','oook']]\n response = ['folder created','Folder already exists. Try with another folder name']\n res = []\n for val in inputs:\n res.append(test.create_folder(val))\n self.assertListEqual(res, response)", "def select_hares_folder(self):\n # Get path\n path = QtWidgets.QFileDialog().getExistingDirectory(self, 'HARES uitvoerbestanden folder')\n if not path:\n return None\n\n self.input_elements['hares folder'].set_value(path)", "def download_folder(self) -> Path:\n path = (\n config.storage_vol\n / f\"{self.device_type}/{self.patient_id}/{self.device_id}\"\n )\n path.mkdir(parents=True, exist_ok=True)\n return path", "def check_create_folder(self, output, *folder_names):\n path = self.video_file\n\n # if none then create diectory on same level as video directory with the folder_name and video name\n if output is None:\n output = os.path.abspath(os.path.join(os.path.dirname(path), os.pardir, *folder_names))\n else:\n output = os.path.join(output, self.video_name)\n\n # if directory not existing create directory\n if not os.path.exists(output):\n print('created new directory: ', output)\n os.makedirs(output)\n\n return output", "def create_folder(output_directory: str, fldrname: str):\n\n os.makedirs(output_directory, exist_ok=True)\n tstmp = datetime.now().strftime('%Y%m%d_%H%M%S')\n try:\n fldr_path = os.path.join(output_directory, fldrname)\n os.mkdir(fldr_path)\n except FileExistsError:\n fldr_path = os.path.join(output_directory, fldrname + '_{}'.format(tstmp))\n os.mkdir(fldr_path)\n return fldr_path", "def path_helper(location, date, time, slc_dir='slc', data_dir='/media/bup/Data'):\n\n base_folder = data_dir + '/' + location + '/' + date + '/'\n name = date + '_' + time\n def_path = base_folder + slc_dir + '/' + name\n return def_path", "def get_outfolder():\n \n valid = False\n while not valid:\n fname = raw_input(\"Please enter directory to save images. \")\n if not os.path.exists(fname):\n os.makedirs(fname)\n #Check to see if the file is there.\n if os.path.exists(fname): \n valid = True\n #File is not there, check to see if write privileges can be given\n #to created file.\n elif os.access(os.path.dirname(fname), os.W_OK):\n valid = True\n else:\n print \"Invalid local path, please try again.\"\n return fname", "def _newFileWizard(self):\n\n dialogs = Dialogs(self.view)\n\n #get category\n category = dialogs.radioButtonDialog('Pick a category:' , \n self.model.project.config['NEW_FILE_CATEGORIES'])\n\n #get name of asset/shot (loop until valid input)\n name = dialogs.fileTextPrompt('Enter name:')\n\n #get task\n tasks = self.model.project.config['NEW_FILE_TASKS'][category][:]\n tasks.append('Other')\n\n task = dialogs.radioButtonDialog('Pick a task:' , tasks)\n\n if task == 'Other':\n task = dialogs.textPrompt('Enter task:')\n\n #get the path of the file we are creating\n category_dir = self.model.project.config['NEW_FILE_PATHS'][category]\n result = os.path.join(category_dir, name, '%s_%s.mb' % (name, task))\n\n #ask to overwrite\n print 'EXISTS:', result, os.path.exists(result)\n if os.path.exists(result):\n msg = 'The file \"%s\" already exists. Overwrite?' % result\n dialogs.confirmPrompt(msg)\n\n #confirm all settings\n msg = 'You have selected:'\n msg += '\\n\\n\\tCategory: %s' % category\n msg += '\\n\\tName: %s' % name\n msg += '\\n\\tTask: %s' % task\n msg += '\\n\\nThis will create a file in this location: '\n msg += '\\n%s' % result\n msg += '\\n\\nDoes this look okay?'\n dialogs.confirmPrompt(msg)\n\n return result", "def getDirectory(nameP:str) -> str:\n nameObject = list(nameP.split(\" \"))\n lengName = len(nameObject)\n if (lengName >= 2):\n concat = \"\"\n nameProject = (concat.join(nameObject))\n else:\n nameProject = nameP\n\n lo = os.getcwd()\n # print(lo)\n if platform == 'linux' or platform == 'linux2':\n li = list(lo.split(\"/\"))\n var1 = li[0]\n var2 = li[1]\n var3 = li[2]\n pathC = (\"{0}/{1}/{2}/Documents/Respaldos/{3}\").format(var1, var2, var3, nameProject)\n return pathC\n elif platform == 'darwin':\n li = list(lo.split(\"/\"))\n var1 = li[0] \n var2 = li[1]\n var3 = li[2]\n pathC = (\"{0}/{1}/{2}/Documents/Respaldos/{3}\").format(var1, var2, var3, nameProject)\n return pathC\n elif platform == 'win32': \n li = list(lo.split(\"\\\\\"))\n var1 = li[0]\n var2 = li[1]\n var3 = li[2]\n pathC = (\"{0}/{1}/{2}/Documents/Respaldos/{3}\").format(var1, var2, var3, nameProject)\n return pathC", "def folder_str(f):\n if not os.path.exists(f):\n raise argparse.ArgumentTypeError('\"%s\" does not exist, you must create this folder' % f)\n return f", "def subdir(self):", "def get_or_create_folder(self, folder_names):\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(\n name=folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print(\"folder_created #%s folder : %s -- created : %s\" % (self.folder_created,\n current_parent, created))\n return current_parent", "def __init__(self, mode, variable):\n self.mode = mode\n if self.mode == 'file':\n self.file_list = variable.split(',')\n\n elif self.mode =='directory':\n self.file_list = os.listdir(variable)\n self.file_list = [os.path.join(variable, x ) for x in self.file_list]", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def create_save_folder(self):\n absolute_output = os.path.abspath(self.output).replace(\"\\\\\", \"/\")\n if self.paddle_length_factor is not None:\n self.save_folder = f\"{absolute_output}/{self.env_name}/PaddleLength_\" \\\n f\"{self.paddle_length_factor}/session{self.session}\"\n else:\n self.save_folder = f\"{absolute_output}/{self.env_name}/StandardEnv/session{self.session}\"\n tmp_folder = self.save_folder\n\n folder_tree = []\n while True:\n if not os.path.exists(self.save_folder):\n folder_tree.insert(0, self.save_folder)\n self.save_folder = self.save_folder[:self.save_folder.rindex(\"/\")]\n else:\n self.save_folder = tmp_folder\n break\n for folder in folder_tree:\n os.mkdir(folder)", "def make_directory(base):\n \t\n i = 0\n while 1:\n try:\n if i == 0:\n dirname = base\n else:\n dirname = base + '_' + str(i)\n os.mkdir(dirname)\n break\n except OSError:\n if not os.path.isdir(dirname):\n raise\n i += 1\n pass \n return dirname", "def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)" ]
[ "0.6584977", "0.6489762", "0.6430416", "0.6309875", "0.63006985", "0.6285221", "0.6282425", "0.6207818", "0.61075854", "0.60703206", "0.6058079", "0.6047258", "0.6042317", "0.6019528", "0.6018604", "0.6012094", "0.6007274", "0.597246", "0.5971677", "0.59710586", "0.5961468", "0.59304285", "0.5913119", "0.5898179", "0.5898179", "0.589369", "0.5884262", "0.58807963", "0.587067", "0.5868905", "0.5860718", "0.585512", "0.58431643", "0.58394337", "0.5827043", "0.58230865", "0.58107424", "0.58063555", "0.5802323", "0.5797475", "0.5777721", "0.5762653", "0.57625324", "0.5761658", "0.57464486", "0.5742524", "0.57387376", "0.57295126", "0.5723771", "0.57212895", "0.57212275", "0.57201725", "0.57175213", "0.57135296", "0.5706761", "0.5702264", "0.5686973", "0.5669161", "0.56677324", "0.56555647", "0.5648236", "0.56432843", "0.5642941", "0.56371003", "0.56332517", "0.5628237", "0.5615142", "0.5611134", "0.56037956", "0.56008375", "0.55986863", "0.55957216", "0.55899477", "0.55846155", "0.55806863", "0.55689126", "0.5559075", "0.5537364", "0.5524695", "0.55208415", "0.55185413", "0.5516979", "0.5511773", "0.5509796", "0.55096596", "0.550345", "0.54952574", "0.5495097", "0.54865587", "0.54787964", "0.54786766", "0.54765", "0.5474967", "0.54696953", "0.5460805", "0.5458414", "0.545145", "0.5447005", "0.5443468", "0.5432737" ]
0.56268847
66
Sends all the inputinformation to loop
def start_config(self, entries, name_col, list_error, list_of_project_info): var = [] for x in entries[3:]: var.append(x.get()) list_error = loop_trough_row(var, name_col, list_error, list_of_project_info) return list_error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop(self):\n pass", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def processInputs(self):", "def run(self):\n while self.inputs:\n readable, writeable, exceptions = select(self.inputs,\n self.outputs,\n self.inputs)\n for s in readable:\n if s is self.server and self.accepting:\n self.accept(s)\n else:\n data = s.recv(1024)\n if data:\n self.parse(data.rstrip(), s)\n else:\n self.remove(s)\n\n # Writeable\n for s in writeable:\n self.send(s)\n\n # Exceptions\n for s in exceptions:\n self.remove(s)", "def Run(self):\n while not rospy.is_shutdown():\n # get input of destination\n if (self.target_x is None) or (self.target_y is None):\n self.target_x, self.target_y = self.GetTarget()\n # if the destination is not in x >= 0 and y <= 11, ask user to re-input\n # till the right values found\n if (self.target_x < 0 or self.target_x > 10) or (self.target_y < -10 or self.target_y > 10):\n print(\"-------------------------------------------------------\") \n print(\"WARNING: Invalid Input, please reinput the destination.\")\n print(\"-------------------------------------------------------\") \n self.target_x = None\n self.target_y = None\n else:\n print(\"Current Destination: [{}, {}]\".format(self.target_x, self.target_y))\n else:\n ################################################################################################\n # get all necessary parameters\n goal = np.array([self.target_x, self.target_y])\n robot_pos = self.GetRobotInfo()\n ranges, angles = self.GetObstaclePos()\n\n if (ranges is not None) and (angles is not None):\n ctrl = TangentBug(self.Lidar.range_max)\n # obsts = ctrl.Continuity(ranges, angles, robot_pos[:2])\n # print(len(obsts))\n linear, omega = ctrl.MotionToGo(ranges, angles, goal, robot_pos)\n print(\"=======================================\")\n # print([linear, omega])\n else:\n linear = 0.\n omega = 0.\n print(\"---------------------------------------\")\n print(\"NO OBSTACLE DETECTED.\")\n print(\"---------------------------------------\")\n\n ################################################################################################\n self.vel.linear.x = linear \n self.vel.angular.z = omega\n self.pub.publish(self.vel)\n\n # sleep till the next commend sent\n self.rate.sleep()", "def run(self):\n self.print_welcome()\n self.handle_inputs()", "def run(self, *arg, **kw):\n self.dirty = False\n for port in self.inputs:\n self.get_input_data(port)", "def run(self):\n while True:\n # Status message from state machine\n self.updateStatusMessage.emit(self.sm.status_message)\n # Serial errors from rexarm\n self.updateJointErrors.emit(self.rexarm.get_errors())\n # Only get rexarm feedback if initialized\n if self.rexarm.initialized:\n self.updateJointReadout.emit(self.rexarm.position_fb)\n self.updateEndEffectorReadout.emit(self.rexarm.get_wrist_pose())\n time.sleep(0.1)", "def __process_input(self):\n\n while not self.stop_event.isSet():\n\n readable, writable, exceptional = select.select([self.event_queue], [], [])\n\n if readable[0] is self.event_queue:\n\n event = self.event_queue.get()\n \n if (time.time() - event.creation) > INSTRUCTION_TIMEOUT:\n self.logging_queue.put(self.__create_event_obj(ERROR, 'TimeOut', str(time.time() - event.creation)))\n self.logger1.info(\"Instruction rejected due to timeout: '{}', '{}', '{}'\".format(event.source, event.type, event.value))\n \n elif not self.__input_filter(event):\n self.logging_queue.put(self.__create_event_obj(ERROR, 'Filtered', '{}, {}, {}'.format(event.source, event.type, event.value)))\n \n else:\n \n self.logging_queue.put(event) \n \n if event.type == self.input_commands.toggle_door_cmd:\n self.__toggle_door()\n self.__update_door_info()\n elif event.type == self.input_commands.light_cmd:\n self.__light()\n elif event.type == self.input_commands.open_door_cmd:\n self.__open_door()\n self.__update_door_info()\n elif event.type == self.input_commands.close_door_cmd:\n self.__close_door()\n self.__update_door_info()\n elif event.type == self.input_commands.control_wire:\n self.__log_output_pin_state(event)\n self.__update_door_info()\n elif event.type == self.input_commands.stop_cmd:\n self.__del__()\n return None\n \n \n #if event.hardware:\n # self.__log_input_pin_state(event) ", "def run(self):\n if has_GUI:\n self.GUI(self.buffer)\n else:\n while True:\n message = input(\"Write your command:\\n\")\n # print(message)\n self.buffer.append(message)", "def run(self):\n self.cmdloop()", "def Go(self):\n for ii in range(self.Nt):\n self.ReadData(ii)\n self.Writedata(ii)\n \n print '##################\\nDone!\\n##################'", "def main_loop(self):\n # run for only the allotted time (lifetime)\n for _ in range(self.lifetime * self.ticks_per_second):\n start_time = time()\n new_message, queue_len = self.communicator.get_message()\n if new_message is None: # no incoming messages\n self.do_random_task()\n else:\n # Convert string message back into tuple of ints\n new_message = list(map(int, new_message.split('@@@')))\n self.handle_incoming_message(new_message, queue_len)\n\n # this accounts for the time already taken in test_communication\n # and other activities from the total time allotted for the loop iteration\n already_taken = time() - start_time\n sleep_time = max(1/self.ticks_per_second - already_taken, 0)\n sleep(sleep_time)", "def loop(self):\n raise NotImplementedError()", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def run(self):\n while True:\n lines = self.process(date_generator=self.date_generator).split('\\n')\n for line in lines:\n if self.probability():\n if not self.simulation:\n self.engine.send(tag=self.tag, msg=str(line))\n now = datetime.utcnow().ctime()\n print('{0} => {1}'.format(now, str(line)))\n else:\n now = datetime.utcnow().ctime()\n print('{0} => Skipped by prob.'.format(now))\n\n if self.interactive:\n input(\"» Press Enter for next iteration «\")\n else:\n self.wait()", "def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()", "def wait_for_input(self):\n pass", "def _read_loop(self):\n while True:\n self.read()", "def run(self):\n tick_duration = 1 / self.config.tick_rate\n last_tick_time = time.time()\n\n while True:\n input_ = self.input_source.get_input()\n self.__update(input_)\n\n if self.state.exit:\n break\n\n current_time = time.time()\n sleep_time = tick_duration - (current_time - last_tick_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n last_tick_time = current_time", "def processIncoming(self):\r\n while self.queue.qsize():\r\n try:\r\n volume_T101 = self.queue.get(0)\r\n self.var_pb_progress.set(volume_T101/100) #scale to 100\r\n self.var_T101.set(\"T101: \" + str(round(volume_T101,4)))\r\n self.var_LIT101.set(self.take_reading(volume_T101))\r\n self.update_physical(volume_T101)\r\n self.PLC_command()\r\n self.check_attack(volume_T101)\r\n self.output_results()\r\n self.master.update_idletasks()\r\n except queue.Empty:\r\n pass", "def run(self):\n # Open three threads to execute receiving message, sending out message\n # and checking timeouts simutaneously.\n RV = threading.Thread(target=self.recv, args=())\n RV.daemon = True\n RV.start()\n TO = threading.Thread(target=self.check_time, args=())\n TO.daemon = True\n TO.start()\n SD = threading.Thread(target=self.send, args=())\n SD.daemon = True\n SD.start()\n while True:\n type_in = raw_input(self.name_str + '--> ')\n order = type_in.split(' ', 1)\n if order[0] == 'LINKDOWN' and len(order) == 2:\n self.link_down(order[1])\n elif order[0] == 'LINKUP' and len(order) == 2:\n self.link_up(order[1])\n elif order[0] == 'SHOWRT' and len(order) == 1:\n self.showrt()\n elif order[0] == 'CLOSE' and len(order) == 1:\n self.close()\n break\n else:\n print('invalid order, try again!')\n sys.exit()", "def run(self):\n\n if self._device_type == \"BW\":\n while self.working:\n if self.q.qsize() >= 1:\n message = self.q.get()\n print(self.q.qsize())\n\n # //-process the data use Util.SplitterD\n self.split.process_string(message, self.ori1, self.ori2, self.final1,\n self.final2, self.display1, self.display2)\n else:\n while self.working:\n if self.q.qsize() >= 3:\n message = self.q.get()\n message += self.q.get()\n message += self.q.get()\n print(self.q.qsize())\n\n # original1 = []\n # original2 = []\n # res1 = []\n # res2 = []\n # ticks1 = time.time()\n\n # //-process the data use Util.Splitter\n self.split.process_string(message, self.ori1, self.ori2, self.final1,\n self.final2, self.display1, self.display2)\n # ticks2 = time.time()\n # print((ticks2 - ticks1) * 1000)\n # original1, original2, res1, res2 = self.split.ori1, self.split.ori2, self.split.res1, self.split.res2\n # if len(original1) < 0 or len(original2) < 0 or len(res1) < 0 or len(res2) < 0:\n # continue\n # else:\n # # print(res2[0])\n # # message = {'ori1': original1,\n # # 'ori2': original2,\n # # 'res1': res1,\n # # 'res2': res2}\n # # self.sigOut.emit(message)\n # self.ori1.extend(original1)\n # self.ori2.extend(original2)\n # self.final1.extend(res1)\n # self.final2.extend(res2)\n # self.display1.put(res1)\n # self.display2.put(res2)\n # # length = len(res2)\n # # for index in range(length):\n # # self.display1.put(res1[index])\n # # self.display2.put(res2[index])", "def run(self):\n run1=0\n while (run1==0):\n Publisher().sendMessage(\"updatetext\", \"\")\n time.sleep(3)", "async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()", "def send_msg(self):\n while True:\n msg = input()\n # Added to show logs clean at the first time\n # a conncetion send a message.\n if(self.flag):\n self.k = self.k + 1\n self.flag = False\n self.srvsock.send(bytes(msg, encoding='utf-8'))", "def run(self):\n while True:\n display(self.world.draw())\n self.read_and_process_input()", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def run(self):\n self.startSerial()\n # Wait about five seconds before doing anything\n time.sleep(5)\n while True:\n # Check setpoints against all controllers\n self.check_setpoints()\n\n # Issue any new commands as necessary\n self.check_pins()\n\n # Receive the latest Arduino data and process into dictionary\n self.read_arduino_data_and_format_dictionary()\n\n # Clean all of the arduino stuff to avoid incorrect inputs\n with self.lock:\n self.ser.reset_output_buffer()\n with self.lock:\n self.ser.reset_input_buffer()", "def work(self):\n while True:\n message = self.get()\n self.handle(message)", "def run(self):\n \n \n # pyautogui.click(x=1310, y=210)\n # pyautogui.click(x=1310, y=210)\n # pyautogui.click(x=1310, y=210)\n # pyautogui.typewrite(self.t_amount, interval=0.05)\n while True : \n read_email_from_gmail(self.tp_val,self.sl_val,self.t_amount,self.get_iq_balance(),self.file_path) \n\n time.sleep(self.interval)", "def loop(self):\n while True:\n if self.gui_updates:\n self.update_gui()\n\n event, values = self.window.read(100)\n\n if event == \"btn_con_game\":\n Thread(target=self.connect_game, daemon=True).start()\n elif event == \"btn_con_headset\":\n Thread(target=self.connect_headset, daemon=True).start()\n elif event == \"btn_train_model\":\n Thread(target=self.train_model, daemon=True).start()\n elif event == \"btn_finalize\":\n Thread(target=self.finalize, daemon=True).start()\n \n to_update = self.loading.copy()\n for update in to_update:\n self.window.Element(f'{update}_loading').UpdateAnimation('assets/loading.gif')\n\n # End program if user closes window\n if event == sg.WIN_CLOSED:\n break\n\n self.window.close()", "def run(self):\n self.read_from_serial()", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def loop(self):\n line = self.read()\n while line != \"quit\":\n value = self.eval(line)\n print(value)\n line = self.read()", "def run():\n #LOG.debug(\"and so it begins\")\n intro()\n\n reloop = True\n while reloop is True:\n lines = []\n print(\"Awaiting your input: \")\n print('EXIT or ctrl-c to quit WPM')\n test = ''\n while test != 'END' and test != 'EXIT':\n line = input()\n if line == 'EXIT':\n exit()\n elif line != \"END\":\n lines.append(line)\n else:\n test = 'END'\n #LOG.debug(lines)\n\n parse_lines(lines, p)\n\n #LOG.debug(p)", "def process():", "def writeInput(self):\n\n #self.collect.writeInput()", "def handle_input(self, text):\n for tsk in self.commands:\n for expr in tsk.starters:\n match_test = expr.pattern.match(text)\n if match_test:\n arg_dict = {}\n if len(match_test.groups()) == 0: # informationless command\n arg_dict = {expr.arg_names[0]: text}\n else:\n arg_dict = gen_dict(\n expr.arg_names, match_test.groups())\n self.overseer.start_process(\n tsk.name, tsk.thread_func, arg_dict)\n return\n if not self.overseer.is_running(tsk.name):\n continue\n for expr in tsk.command_patterns:\n match_test = expr.pattern.match(text)\n if match_test:\n print(\"Attempting to send command '\", text, \"' to\", tsk.name)\n if self.overseer.is_blocked(tsk.name):\n print(\"Error: blocked channel\")\n return\n arg_dict = {}\n if len(match_test.groups()) == 0: # informationless command\n arg_dict = {expr.arg_names[0]: text}\n else:\n arg_dict = gen_dict(\n expr.arg_names, match_test.groups())\n self.overseer.send_args(tsk.name, arg_dict)\n return", "def _run(self):\n while(self._loop):\n pass", "def input(self):\r\n pass", "def step(self):\n # Pull data from the first available input channel.\n\n input_bag = self.get()\n\n # todo add timer\n self.handle_results(input_bag, input_bag.apply(self._stack))", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def run(self):\n while self.is_connected():\n self.__ticker.tick() # Tick (sleep)\n\n if self.process and self.process.is_alive():\n self.update()\n continue\n\n c = getkey() \n if c:\n if c == 'w':\n print \"Moving forward\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mmove\")\n elif c == 'a':\n print \"Turning left\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mleft\")\n elif c == 'd':\n print \"Turning right\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mright\")\n elif c == 'f':\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"finish\")\n elif c == 'p':\n self.add_property(\"name\", \"remote_command\")\n self.add_property(\"pioneer_command\", \"record\")\n elif c == 'h':\n print \"[w] = forward [a] = left [d] = right [f] = finish\"\n\n \n ############################\n # Send data\n self.update()", "def start(self):\n\n\t\twhile True:\n\t\t\tinputReady, outputReady, exceptReady = select.select(\n\t\t\t\t[self.s],\n\t\t\t\t[],\n\t\t\t\t[],\n\t\t\t\t3\n\t\t\t)\n\n\t\t\t# Ready for receiving\n\t\t\tif len(inputReady) > 0 and inputReady[0] == self.s:\n\t\t\t\t# Read lines until input buffer is empty\n\t\t\t\tfor line in self.receiveLines():\n\t\t\t\t\tif len(line) > 0:\n\t\t\t\t\t\tprint(line)\n\n\t\t\t\t\tself.handle(line)\n\n\t\t\t# Only send if there is something to send\n\t\t\tif not self.outQueue.empty():\n\t\t\t\tm = self.outQueue.get_nowait()\n\n\t\t\t\tprint(\"Sending '{}'\".format(m.rstrip(\"\\r\\n\")))\n\t\t\t\tself.s.send(bytes(m, \"utf-8\"))\n\t\t\t\tself.outQueue.task_done()", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def run(self):\n # Define our components entry point\n while True:\n\n # for each packet waiting on our input port\n for packet in self.receive_all('in'):\n try:\n image = packet.get(\"data\")\n log.debug(\"%s received %s %s\",\n self.__class__.__name__, image.shape, image)\n mask = np.zeros_like(image, dtype=np.bool)\n for i, line in enumerate(image):\n minimum = np.min(image[i])\n factor = 1.1\n threshold = factor * minimum\n mask[i] = image[i] < threshold\n packet.set(\"data\", mask)\n except:\n log.error('Component Failed: %s',\n self.__class__.__name__, exc_info=True)\n\n # send the packet to the next component\n self.send('out', packet)\n\n # yield the CPU, allowing another component to run\n self.yield_ctrl()", "def run(self):\n\t\twhile 1:\n\t\t\tif self._driveSystem.port_open == True and self._parent.aborted==False:\n\n\t\t\t\tself._driveSystem.check_encoder_pos()\n\t\t\t\tpos=self._driveSystem.positions\n\t\t\t\tif self._parent.printRequest==True: #Print positions when print Button was pressed\n\t\t\t\t\toutput=\"Axis 1: \"+str(pos[0])+\"\\nAxis 2: \"+str(pos[1])+\"\\nAxis 3: \"+str(pos[2])+\"\\nAxis 4: \"+str(pos[3])\n\t\t\t\t\tprint(output)\n\t\t\t\t\tself._parent.printRequest=False\n\t\t\t\tevent = PosUpdateEvent(myEVT_POSUPDATE, -1, pos)\n\t\t\t\twx.PostEvent(self._parent.matplotpanel, event)\n\n\t\t\t\tt=0\n\t\t\t\twhile t<UPDATE_TIME:\n\t\t\t\t\tself.checkQ()\n\t\t\t\t\ttime.sleep(REAC_TIME)\n\t\t\t\t\tt=t+REAC_TIME\n\t\t\telse:\n\t\t\t\ttime.sleep(1)", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def __main_loop(self):\n\n while not self.stop:\n self.__lock_data = True\n self.__bus_messages_copy = deepcopy(self.__bus_messages)\n self.__line_messages_copy = deepcopy(self.__line_messages)\n self.__global_messages_copy = deepcopy(self.__global_messages)\n self.__bus_messages = {}\n self.__line_messages = {}\n self.__global_messages = {\"kick reason\": \"\", \"free text\": \"\"}\n buses_to_kick_copy = deepcopy(self.__buses_to_kick)\n self.__buses_to_kick = list()\n self.__lock_data = False\n\n for bus in buses_to_kick_copy: # handles the buses that need to be kicked\n message = \"kicked for reason:\" + self.__global_messages_copy[\"kick reason\"]\n if bus.line_num in self.__line_messages_copy.keys():\n message += self.__line_messages_copy[bus.line_num][\"kick reason\"]\n if bus.line_num in self.__line_messages_copy.keys() and bus.id in self.__bus_messages_copy[\n bus.line_num].keys():\n message += self.__bus_messages_copy[bus.line_num][bus.id][\"kick reason\"]\n print(f\"sending message{message.strip()}\")\n bus.send_to_bus(message.strip())\n\n global_message = self.__build_global_update()\n for line, buses in self.__bus_controller.bus_dict.items():\n line_message = self.__build_line_update(line)\n for bus in buses:\n bus_message = self.__build_bus_update(bus)\n message = global_message + line_message + bus_message\n message = message.strip(\"\\n\")\n if message != \"\":\n bus.send_to_bus(message)\n\n sleep(MessagesSender.SLEEP_TIME)\n\n self.__shut_down()\n print(\"polling thread stopped\")", "def startDataAcq(self):\r\n\t\tglobal payload, control, output_settings, serials, datfiles\r\n\t\t# INITIALIZE THE OUTPUT FOLDER STRUCTURE\r\n\t\tcheck_dir(output_settings['folder'], output_settings['cruise'], payload)\r\n\t\tconfirm_dir(output_settings['folder'], output_settings['cruise'])\r\n\t\t# FIND THE START TIME\r\n\t\toutput_settings['start_time'] = init_time()\r\n\t\t# PRINT THE START TIME\r\n\t\tprint_spacer()\r\n\t\tprint 'Local Time: ', time.ctime(output_settings['start_time'])\r\n\t\tprint 'UTC: ', time.asctime(time.gmtime(output_settings['start_time']))\r\n\t\t\r\n\t\t# LOOP THROUGH THE SCIENTIFIC PAYLOAD\r\n\t\tfor k in payload.keys():\r\n\t\t\ttry:\r\n\t\t\t\tif serials[k].isOpen():\r\n\t\t\t\t\tclose_serial(serials[k])\r\n\t\t\texcept KeyError:\r\n\t\t\t\tprint ' '\r\n\t\t\t\t# print 'Serial port connected to '+k+' was not previously open.'\r\n\t\t\t# open the serial port\r\n\t\t\tserials[k] = init_serial(payload[k])\r\n\t\t\tif serials[k].isOpen():\t\t\t\t\r\n\t\t\t\t# print the serial info\r\n\t\t\t\tprint 'Receiving data from '+k\r\n\t\t\t\t# initialize the data file\r\n\t\t\t\tdatfiles[k] = init_datafile(output_settings, payload[k])\r\n\t\t\t\t# read one line because the first one after opening a port is usually gibberish\r\n\t\t\t\tline = serials[k].readline()\r\n\t\t\telse: \r\n\t\t\t\tprint 'Unable to connect to serial port '+payload[k]['port']+' connected to '+k\r\n\t\t\t# pause get everything setup\r\n\t\t\ttime.sleep(1)\r\n\t\t# start the loop \r\n\t\tcontrol.combine()", "def run(self):\n while(not self.stop_event.is_set()):\n # read values until stop is sent\n response1 = _read_once(1,self.serial)\n response2 = _read_once(2,self.serial)\n #print(response)\n self.data1[\"d\"].append(response1) # Push response to the data list for later\n self.data2[\"d\"].append(response2) # Push response to the data list for later\n curTime = time.time()\n self.data1[\"t\"].append(curTime)\n self.data2[\"t\"].append(curTime)\n #sleep(0.0001) # I need to be small enough to capture peaks.\n return", "def run(self):\n while True:\n buf = \"\"\n while len(buf) == 0 or buf[-1] != '\\n':\n if self.ser.available(): buf += self.ser.read()\n else: delay(1) # Avoid pegging CPU\n\n tokens = buf.split(' ')\n s = tokens[0]\n self.lock.acquire()\n try:\n if s == \"PPM\":\n self.ppm = [int(i) for i in tokens[1:]]\n elif s == \"Wind\":\n self.wind = int(tokens[1])\n elif s == \"roll\":\n self.roll = float(tokens[1])\n elif s == \"yaw\":\n self.yaw = float(tokens[1])\n elif s == \"Wpot\":\n self.winch = int(tokens[1])\n elif s == \"Rpot\":\n self.rudder = int(tokens[1])\n except: pass # A cast likely failed\n self.lock.release()", "def run_main(self):\n self.addSensors()\n \n while True:\n # Leemos los sensores\n self.readSensors()\n \n # Extraemos la información a partir de los datos\n self.processData()\n \n # Actualizamos la máquina de estados a partir de la información recibida por los sensores \n self.updateFiniteStateMachine()\n \n # Calculamos las acciones que tenemos que aplicar a los distintos motores, en función del\n # estado y las lecturas de los sensores\n self.controller()\n \n # Pasamos a motores las acciones calculadas\n self.execute()\n\n # Publicamos info importante para el debug\n self.refreshUserInterface()\n \n print(self.name + \": --------------------------\")\n time.sleep(2) #!!!!!!!!!!!!!!!! ELIMINAR DELAY !!!!!!!!!!!!!!!!# ", "def run(self):\n try:\n while True:\n in_buff = self.stream.read_in_buf()\n for message in in_buff:\n packet = PacketFactory.parse_buffer(message)\n self.handle_packet(packet)\n self.stream.clear_in_buff()\n self.handle_user_interface_buffer()\n self.stream.send_out_buf_messages(self.reunion_mode == ReunionMode.FAILED)\n time.sleep(2)\n except KeyboardInterrupt:\n log('KeyboardInterrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)", "def run(self):\n while True:\n msg = self.recv()", "def run(self):\n\t\tself._run_neural_network()\n\t\tself._run_webots()\n\n\t\t# We first wait for webots inputs\n\t\twebotsTurn = True\n\t\tneuralNetworkTurn = False\n\t\twhile True:\n\t\t\tif webotsTurn:\n\t\t\t\tprint \"reading data from webots:\"\n\t\t\t\twbtData = self._wbt_read_data()\n\t\t\t\tprint \"sending data to nn...\"\n\t\t\t\tself._send_data_to_nn(wbtData)\n\t\t\t\twebotsTurn = False\n\t\t\t\tneuralNetworkTurn = True\n\n\t\t\telif neuralNetworkTurn:\n\n\t\t\t\tprint \"reading data from nn:\"\n\t\t\t\tnnData = self._nn_read_data()\n\t\t\t\tif self._neuralNetwork.poll() != None: break\n\t\t\t\tprint \"sending data to webots...\"\n\t\t\t\tself._send_data_to_wbt(nnData)\n\t\t\t\tneuralNetworkTurn = False\n\t\t\t\twebotsTurn = True", "def iterate(self, input_dict):\n\n self.gameDisplay.fill(self.display_states[self.display_names[self.current_display_state]]['background'])\n\n self.input_letter = input_dict['chord']\n\n if self.input_letter == '000000':\n self.input_letter = None\n \n self.input_control = input_dict['standard']\n\n if self.input_control == 'display':\n self.change_display_state()\n\n self.display_status_box()\n\n if self.game_state == 'introduction':\n self.introduction()\n elif self.game_state == 'play_game':\n self.play_game()\n\n self.draw_buttons(self.current_prompt)\n\n self.pygame.display.update()", "def process(self):\n if len(self.inputs):\n self._process_input()\n while len(self.servers) > 0:\n self._process_input()\n self._write_file()", "def send():\r\n \r\n while True:\r\n server_input = client.recv(HEADER).decode(FORMAT)\r\n server_input = str(server_input)\r\n print(server_input)\r\n if server_input == 'CLEAR':\r\n clear = client.recv(HEADER).decode(FORMAT)\r\n clear = str(clear)\r\n # print(clear)\r\n if server_input == 'SELECT':\r\n selection = selectPlayer()\r\n client.send(selection.encode(FORMAT))", "def run(self, input):\n print self.print_meep(input)", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def listen(self):\n while self.active:\n self.handle_input()", "def server_do(self,input, connstream):\r\n pass", "def input(self):", "def feedbackLoop(self):\n while not rospy.is_shutdown():\n try:\n # Robot Status Feedback\n if self.socket_available:\n self.socket_available = False # Block other operations from using the socket while in use\n robot_status = self.robot.GetStatusRobot()\n gripper_status = self.robot.GetStatusGripper()\n self.socket_available = True # Release the socket so other processes can happen\n status = UInt8MultiArray()\n status.data = [\n robot_status[\"Activated\"],\n robot_status[\"Homing\"],\n robot_status[\"Simulation\"],\n robot_status[\"Error\"],\n robot_status[\"Paused\"],\n robot_status[\"EOB\"],\n robot_status[\"EOM\"],\n gripper_status[\"Gripper enabled\"],\n gripper_status[\"Homing state\"],\n gripper_status[\"Limit reached\"],\n gripper_status[\"Error state\"],\n gripper_status[\"force overload\"]\n ]\n self.status_publisher.publish(status)\n\n # Position Feedback\n self.feedback.get_data()\n joints_fb = JointState()\n joints_fb.position = feedback.joints\n pose_fb = Pose()\n pose_fb.position.x = feedback.cartesian[0]\n pose_fb.position.y = feedback.cartesian[1]\n if len(feedback.cartesian) == 4:\n pose_fb.orientation.x = feedback.cartesian[2]\n pose_fb.orientation.y = feedback.cartesian[3]\n else:\n pose_fb.position.z = feedback.cartesian[2]\n pose_fb.orientation.x = feedback.cartesian[3]\n pose_fb.orientation.y = feedback.cartesian[4]\n pose_fb.orientation.z = feedback.cartesian[5]\n self.joint_publisher.publish(joints_fb)\n self.pose_publisher.publish(pose_fb)\n except Exception as error:\n rospy.logerr(str(error))", "def algorithm_loop(self):", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return", "async def loop(self):\n\t\twhile self.active:\n\t\t\ttry:\n\t\t\t\tawait self.process_data(await self.websocket.recv())\n\t\t\texcept exceptions.ClientError as e:\n\t\t\t\tawait self.send(0, str(e))\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tawait self.unregister()", "def run(self): # pragma: no cover\n while True:\n self.update()", "def run(self): \n \n while self.event.is_set():\n \n if self.isEnabled:\n \n try:\n \n # Get and clear all the messages from the listener \n buffered_messages = self.a_listener.get_messages() \n self.a_listener.clear_messages()\n \n #grabs the unique ID's \n unique_ids = list({m.arbitration_id for m in buffered_messages}) \n \n #iterates through the ID's and gets the first instance of each unique one\n for i in unique_ids:\n loop_msg = next( obj for obj in buffered_messages if obj.arbitration_id == i)\n date_str = loop_msg.timestamp\n if date_str not in self.unique_messages:\n self.unique_messages[date_str] = []\n \n # store the unique messages in a dictionary with timestamp index\n self.unique_messages[date_str].append(loop_msg) \n \n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n self.ReEstablishConnection()\n \n time.sleep(self.timer)", "def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r", "def input_reader_worker():\n while True:\n global last_read_player1_input\n global last_read_player2_input\n\n # First read movement inputs from adc\n # First write byte to read from Vin3 - player1 input channel\n try:\n bus.write_byte(I2CADDR, 0x80)\n last_read_player1_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player1_input to a default value instead.\")\n last_read_player1_input[\"movement\"] = config.adc_max_val / 2\n\n # Now write to read from Vin4 - player2 input channel\n try:\n bus.write_byte(I2CADDR, 0x40)\n last_read_player2_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player2_input to a default value instead.\")\n last_read_player2_input[\"movement\"] = config.adc_max_val / 2\n\n # Then read switch inputs from GPIO ports\n try:\n last_read_player1_input[\"stretch\"] = GPIO.input(config.gpio_pin_p1_stretch)\n last_read_player1_input[\"serve\"] = GPIO.input(config.gpio_pin_p1_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player1 switch input\")\n\n try:\n last_read_player2_input[\"stretch\"] = GPIO.input(config.gpio_pin_p2_stretch)\n last_read_player2_input[\"serve\"] = GPIO.input(config.gpio_pin_p2_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player2 switch input\")\n\n time.sleep(1 / float(config.adc_updates_per_sec))", "def readInput():\n for e in pygame.event.get():\n try:\n id = e.joy\n print id\n dev = InputDevice.devs[id]\n if e.type == JOYBUTTONDOWN:\n if e.button == dev._accBtn:\n dev._speed = 1\n elif e.button == dev._revBtn:\n dev._speed = -1\n elif e.button == dev._powBtn:\n dev._boost = 2\n elif e.type == JOYBUTTONUP:\n if e.button == dev._accBtn:\n dev._speed = 0\n elif e.button == dev._revBtn:\n dev._speed = 0\n elif e.button == dev._powBtn:\n dev._boost = 1\n elif e.type == JOYAXISMOTION:\n if e.axis == dev._steeringAxis:\n dev._dir = dev._js.get_axis(dev._steeringAxis)\n except Exception:\n None", "def handle_function(self):\n while True:\n # pack the data into a dictionary\n data = {\n 'steer': global_steer\n }\n\n # use struct to make sure we have a consistent endianness on the length\n length = pack('>Q', len(pickle.dumps(data)))\n\n # sendall to make sure it blocks if there's back-pressure on the socket\n self.socket.sendall(length)\n self.socket.sendall(pickle.dumps(data))\n\n # receive the success token\n ack = self.socket.recv(1)", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "def synchronise(self):\n ainps=[]\n last=None\n for i in self.inputs:\n i.inputedge.entry['bg']=COLOR_normal \n i.inputedge.setEntry(i.edge) \n if(i.inputnumber.var.get() == 1):\n ainps.append(i)\n if i.inpnum == self.allinputs.getLast(): last=i\n if len(ainps) < 1:\n print 'No inputs chosen for synchro.' \n return\n if last == None:\n print 'No last input chosen for synchronisation.'\n return\n print 'last=',last.name\n flag=0\n for i in ainps:\n #print i.phase\n if i.phase == '?':\n flag=1 \n print 'The phase of input ',i.name,i.inpnum,'is not measured.'\n if flag: return \n last.phi='P'\n D = (int(last.phase)+5) % 25\n #here set bcdelay\n cmd=\"setbcdelay(\"+str(D)+\")\"\n output=self.vb.io.execute(cmd,log=\"yes\",applout=\"<>\")\n for i in ainps:\n if i==last: continue\n phi= (int(i.phase)-D) % 25\n if (phi<6) or (phi>18): i.phi='N'\n else: i.phi='P'\n print i.name,i.phi,i.edge\n if i.phi != i.edge: \n i.inputedge.entry['bg']='green'\n i.inputedge.setEntry(i.phi)", "def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass", "def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.adist)\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist, self.packets_sent, src=self.id, flow_id=self.flow_id)\n self.out.put(p)", "def Listen(self):\n while True:\n time.sleep(1)", "def Loop(self):\n self.coshell.SetModesCallback(self.SetModes)\n while True:\n try:\n text = self.Prompt()\n if text is None:\n break\n self.Run(text) # paradoxically ignored - coshell maintains $?\n except EOFError:\n # ctrl-d\n if not self.coshell.ignore_eof:\n break\n except KeyboardInterrupt:\n # ignore ctrl-c\n pass\n except interactive_coshell.CoshellExitError:\n break", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def process(self):", "def process(self):", "def process(self):", "def read_inputs(self):\n self.in_power.read()\n self.in_alert.read()", "def run(self):\n inputs = [self.server]\n\n while self.running:\n print '1'\n try:\n readable, writeable, exceptional = \\\n select.select(inputs, [], [])\n except select.error, e:\n print 'select:error[%s]' % e.message\n break\n\n for sock in readable:\n print '2'\n if sock == self.server:\n client, address = self.server.accept()\n client.setblocking(0)\n inputs.append(client)\n # self.outputs.append(client)\n\n print 'Client[%s] connected!' % address[0]\n self.clients[client] = address[0]\n\n else:\n print '3'\n self.recv_data(sock)", "def main():\n\tports = glob.glob(\"/dev/tty.wchusbserial*\") + glob.glob(\"/dev/tty.usbserial*\") + glob.glob(\"COM3\") + glob.glob(\"COM4\")\n\tBAUDRATE = 9600\n\tchoice = int(input((str(ports) + \" enter numerical index for port: \")))\n\tportname = ports[choice]\n\tport = None\n\tsending_queue = None\n\treceiving_process_on = None\n\treceiving_process = None\n\ttry:\n\t\tsending_queue = multiprocessing.Queue()\n\t\treceiving_process_on = multiprocessing.Value(c_bool,False)\n\t\treceiving_process = multiprocessing.Process(target = communication, args = (portname,BAUDRATE,sending_queue,receiving_process_on))\n\t\treceiving_process.start()\n\t\twhile True:\n\t\t\tword = input(\"Enter a message: \")\n\t\t\tsending_queue.put(create_chunk(word)) #sending 32 bytes to the process queue\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\tfinally:\n\t\treceiving_process_on.value = False\n\t\tfor i in range(10): #wait for the process to stop\n\t\t\tpass\n\t\tif receiving_process != None:\n\t\t\treceiving_process.join()\n\t\t\n\t\tif sending_queue != None:\n\t\t\tsending_queue.close()", "def wait() -> None:\n\n process_input(input())", "def recive_data(self):\n # read all available data\n while self.ser.inWaiting() > self.INPUT_DATA_SIZE+1:\n data = array.array('c')\n # search the header\n data.append(self.ser.read(1))\n while data[0] != chr(1):\n data[0] = self.ser.read(1)\n \n # wait for all available data\n while self.ser.inWaiting() < (self.INPUT_DATA_SIZE-1):\n time.sleep(0.03);\n \n # recives data\n data = self.ser.read(self.INPUT_DATA_SIZE-1)\n \n # prove if you want graphical data\n if self.pushButton_monitor.isChecked():\n # decodes the data\n t = struct.unpack('I', data[3]+data[2]+data[1]+data[0])\n r = struct.unpack('f', data[4]+data[5]+data[6]+data[7])\n x0 = struct.unpack('f', data[8]+data[9]+data[10]+data[11])\n x1 = struct.unpack('f', data[12]+data[13]+data[14]+data[15])\n u = struct.unpack('f', data[16]+data[17]+data[18]+data[19])\n \n self.time = t[0]*25e-9\n \n # prepare the string output\n aux_str = \" t = \"+str(self.time)+\"\\t\"\n aux_str += \" r = \"+str(r[0])+\"\\t\"\n aux_str += \" u = \"+str(u[0])+\"\\t\"\n aux_str += \" x1 = \"+str(x1[0])+\"\\t\"\n aux_str += \" x0 = \"+str(x0[0])+\"\\n\"\n # print string output\n self.textBrowser.insertPlainText(aux_str)\n \n # append data to the arrays\n self.graf_t.append(self.time)\n self.graf_r.append(r[0])\n self.graf_x0.append(x0[0])\n self.graf_x1.append(x1[0])\n self.graf_u.append(u[0])\n \n # remove one value if the arrays have maximum length\n if self.graf_t.buffer_info()[1] >= NUM_SAMPLES:\n self.graf_t.pop(0)\n self.graf_r.pop(0)\n self.graf_x0.pop(0)\n self.graf_x1.pop(0)\n self.graf_u.pop(0)\n \n # reload number of samples lavel\n self.label_samples_value.setText(str(self.graf_t.buffer_info()[1]))\n # reload number of waiting chars in serial rx buffer\n self.label_rx_buff_value.setText(str(self.ser.inWaiting()))\n\n # reload mutex area\n self.updated_data = 1\n \n # prove if there are available id's\n if (self.actionPC_Monitor.isChecked() and data[20] == chr(2)):\n # if it is true, looks how much id's\n i = struct.unpack('B', data[21])\n\n if i[0] < STACK_SIZE:\n for z in range(i[0]):\n new_device = struct.unpack('B', data[z+22])\n new_string = str(new_device[0])\n \n llista = self.listWidget_link.findItems(new_string, QtCore.Qt.MatchExactly)\n if len(llista) == 0:\n self.listWidget_link.addItem(new_string)", "def run_aqi(self):\r\n while True:\r\n self.get_aqi()\r\n time.sleep(30 - time.time() % 30)", "def run(self):\n running = True\n while running:\n ################################### ARDUINO POLLING ###################################\n try:\n print(\"DEBUG: running\")\n if self.debug == 'n':\n # Arduino is connected\n inp = self.arduino.read()\n \n else: \n inp = str(input(\"Enter Packet, Enter Key to skip. Ex: (DATA&BUZZ)\\n\")) \n\n if inp:\n self.socket.setblocking(True)\n arduino_input_header, arduino_input = inp.split('&')\n \n if arduino_input_header == \"DATA\": \n if arduino_input == \"BUZZ\":\n # buzz the owner\n self.buzz_subroutine()\n continue \n \n else:\n # a PIN was sent by the Arduino\n self.pin_check_subroutine(arduino_input)\n continue\n \n \n ########################## LISTEN FOR COMMAND FROM SERVER #########################\n else:\n self.socket.settimeout(1) \n \n # check if a command was received\n try:\n cmd = self.socket.recv(4096)\n\n except:\n print(\"DEBUG: receive timed out\")\n continue\n\n else: \n cmd_hdr, cmd_msg, cmd_sdr = self.parse_packet(cmd)\n \n if cmd_hdr == \"CMD\":\n if cmd_msg == \"LOCK DOOR\": \n # a remote lock was issued\n \n if self.debug == 'n':\n # tell Arduino to lock the door\n self.arduino.write(\"LD\")\n\n print(\"DEBUG: locking override finished\")\n\n elif cmd_msg == \"UNLOCK DOOR\":\n # a remote unlock was issued\n \n if self.debug == 'n':\n # tell Arduino to unlock the door\n self.arduino.write(\"UD\")\n\n print(\"DEBUG: unlocking override finished\") \n \n except (KeyboardInterrupt, SystemExit):\n self.socket.sendall(self.make_packet(\"CMD\", \"SHUTTING DOWN\"))\n raise \n \n except Exception as e:\n self.socket.sendall(self.create_log(sys.exc_info()))", "def sender_iter(self):\n while 1:\n yield self.send_next()", "def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)", "def process_input(self):\n print(\"========================Start of Process_Input() Method*\")\n request_data = [\"name\", 0, 0, 0] # initialing th object variables\n req_data_counter = 0 # refers to an index in a list\n\n with open(self.__file_name) as input_file:\n whole_file = input_file.read().splitlines()\n for i in range(len(whole_file)):\n whole_file[i] = whole_file[i].split(',') # use comma as a delimiter\n for j in range(len(whole_file[i])):\n whole_file[i][j] = whole_file[i][j].strip()\n if req_data_counter < 4: # we will break the data into units\n request_data[req_data_counter] = whole_file[i][j]\n req_data_counter = req_data_counter + 1\n if req_data_counter > 3:\n # create object, having read all values for a single req\n new_request_object = Request.Request(request_data[0], request_data[1], request_data[2],\n request_data[3])\n self.input_list.append(new_request_object)\n assert isinstance(new_request_object, object) # asserting if item added is object request\n req_data_counter = 0 # resetting index counter to start reading new request data\n print(\"========================file reading finished*\")\n self.display_contents(self.input_list)\n print(\"========================End of Process_Input() Method *\")", "def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")" ]
[ "0.68043214", "0.6738158", "0.6389131", "0.6361578", "0.63389635", "0.62979394", "0.626825", "0.6264768", "0.62522584", "0.6227473", "0.6211855", "0.61628276", "0.6144344", "0.6113936", "0.6113531", "0.6113409", "0.6088087", "0.6075006", "0.60653067", "0.6044094", "0.6031112", "0.60304505", "0.6022016", "0.60171664", "0.60160446", "0.60065925", "0.60015476", "0.60005105", "0.5998557", "0.59948075", "0.59939593", "0.5993321", "0.5977636", "0.5974079", "0.596958", "0.5965937", "0.5963222", "0.5941972", "0.5936566", "0.5932655", "0.59248245", "0.5920317", "0.59187067", "0.5916276", "0.5909959", "0.590697", "0.58997864", "0.5898183", "0.5897698", "0.5875562", "0.5861797", "0.5856034", "0.5850896", "0.584865", "0.5847085", "0.5840645", "0.5839154", "0.5836908", "0.58281255", "0.5826236", "0.5821147", "0.5818831", "0.5814807", "0.5813808", "0.5813808", "0.58043206", "0.5802816", "0.57939005", "0.57917434", "0.57849765", "0.5780773", "0.57807434", "0.5775757", "0.5768125", "0.5763722", "0.57452893", "0.57268107", "0.5726465", "0.5725389", "0.57196593", "0.5715231", "0.57145727", "0.57087666", "0.56951994", "0.56692594", "0.5666427", "0.5664603", "0.56638247", "0.56638247", "0.56638247", "0.56624424", "0.56597584", "0.5658885", "0.56553775", "0.5652962", "0.5640068", "0.56336004", "0.563076", "0.5628859", "0.5622568", "0.56096935" ]
0.0
-1
Contoll of inputs and try/except for mainloop
def check_entry(self, controller, entries, list_of_project_info, error_label): for x in range(0, len(entries)): if entries[x].get() == "": messagebox.showerror("Error", "Expected no empty fields") return if not entries[2].get().isalpha(): messagebox.showerror("Error", "Expected column in letter not number, e.g. 'B' ") return name_col = self.col_to_num(entries[2].get()) self.write_to_indata(entries) list_error,error_present = [], [] list_error = controller.start_config(entries, name_col, list_error, list_of_project_info) if len(list_error) == 0: message = "Successfully generated all state files" error_present.append(message) error_label.config(text="Successfully generated all state files") else: for element in list_error: if element.error_type == "1": # error in loop_trough_row message = "expected error in excel spreadsheet at row" + str(element.file_name) + "\n" elif element.error_type == "2": #filname missing message = "expected error in file " + str(element.file_name)+ "\n" elif element.error_type == "3": # Filename error message = "expected error in file name at row " + str(element.file_name) + "\n" elif element.error_type == "4": # "Seems like error in 1:st or 3:rd line in excel sheet" message = "expected error in excel spreadsheet on 1:st or 3:rd row " + "\n" error_present.append(message) error_report = open("error_report.txt", "w+") error_report.write(''.join(error_present)) error_report.close() error_label.config(text="Error occured, check error report in "+ entries[1].get()) # error_label.config(text=(''.join(error_present)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def wait_for_input(self):\n pass", "def main_loop(self):\r\n print('Press ctrl-c to quit')\r\n while True:\r\n url = input('\\nType Question url: ')\r\n handler = AnswerHandler(self.session)\r\n res, err = handler.answer_questions(url)\r\n if res:\r\n print('No more questions for this URL')\r\n else:\r\n print(f'Unexpected exception occurred: {err}', file=sys.stderr)\r\n traceback.print_exc()", "def main():\n try:\n run_it()\n except KeyboardInterrupt:\n print('\\nSee you the next time.')\n except Exception:\n logging.error(traceback.format_exc())", "def input_thread(L):\n raw_input()\n L.append(None)", "def wait() -> None:\n\n process_input(input())", "def main():\n user_input = user_input_state()\n check_user_input(user_input)", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "async def run(self):\n while True:\n try:\n self._input_getch()\n await asyncio.sleep(0.05)\n except KeyboardInterrupt:\n logger.debug(f\"KeyboardInterrupt detected within the view\")\n raise", "def run():\n #LOG.debug(\"and so it begins\")\n intro()\n\n reloop = True\n while reloop is True:\n lines = []\n print(\"Awaiting your input: \")\n print('EXIT or ctrl-c to quit WPM')\n test = ''\n while test != 'END' and test != 'EXIT':\n line = input()\n if line == 'EXIT':\n exit()\n elif line != \"END\":\n lines.append(line)\n else:\n test = 'END'\n #LOG.debug(lines)\n\n parse_lines(lines, p)\n\n #LOG.debug(p)", "def _WaitForAnyUserInput() -> None:\n _get_input('Press any key to continue')", "def __process_input(self):\n\n while not self.stop_event.isSet():\n\n readable, writable, exceptional = select.select([self.event_queue], [], [])\n\n if readable[0] is self.event_queue:\n\n event = self.event_queue.get()\n \n if (time.time() - event.creation) > INSTRUCTION_TIMEOUT:\n self.logging_queue.put(self.__create_event_obj(ERROR, 'TimeOut', str(time.time() - event.creation)))\n self.logger1.info(\"Instruction rejected due to timeout: '{}', '{}', '{}'\".format(event.source, event.type, event.value))\n \n elif not self.__input_filter(event):\n self.logging_queue.put(self.__create_event_obj(ERROR, 'Filtered', '{}, {}, {}'.format(event.source, event.type, event.value)))\n \n else:\n \n self.logging_queue.put(event) \n \n if event.type == self.input_commands.toggle_door_cmd:\n self.__toggle_door()\n self.__update_door_info()\n elif event.type == self.input_commands.light_cmd:\n self.__light()\n elif event.type == self.input_commands.open_door_cmd:\n self.__open_door()\n self.__update_door_info()\n elif event.type == self.input_commands.close_door_cmd:\n self.__close_door()\n self.__update_door_info()\n elif event.type == self.input_commands.control_wire:\n self.__log_output_pin_state(event)\n self.__update_door_info()\n elif event.type == self.input_commands.stop_cmd:\n self.__del__()\n return None\n \n \n #if event.hardware:\n # self.__log_input_pin_state(event) ", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()", "def run(self):\n self.print_welcome()\n self.handle_inputs()", "def _handle_loop(self):\n pass", "def main():\n #set done variable to false\n done = False\n\n while not done: #while loop that keeps running the program until user wants to exit\n\n begin() #function call that opens the tkinter window application\n\n full_exit = False #set full_exit variable to false\n\n while not full_exit:\n try: #error handling\n finish = raw_input(\"Login as another user? (yes/no):\")\n\n #ends both while loops smoothly and completely exits the application\n if finish.lower() == \"no\":\n full_exit = True\n done = True\n\n #stops the program from asking the user input question and runs the application again\n elif finish.lower() == \"yes\":\n full_exit = True\n\n #raises value error and asks the user to input answer until the right answers are entered\n elif finish.lower() != \"no\" or finish.lower() != \"yes\":\n raise ValueError(\"Invalid Input\")\n\n\n except ValueError,errorvar:\n print errorvar,\": Please enter 'yes' or 'no'\"", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def _run(self):\n while(self._loop):\n pass", "def interactiveLoop():\n global STATUS\n num_lines = int(sys.stdin.readline())\n for _ in range(num_lines):\n tuple = map(string.strip, string.split(sys.stdin.readline(), \" \"))\n if len(tuple) == 0: # wrong input (empty line) or EOF\n STATUS = 2\n break\n if tuple[0] == SMTP_SERVER:\n if len(tuple) != 3:\n STATUS = 2\n break\n Check_SMTP(tuple[1], tuple[2])\n elif tuple[0] == NTP_SERVER:\n if len(tuple) != 2:\n STATUS = 2\n break\n Check_NTP(tuple[1])\n elif tuple[0] == DNS_SERVER:\n if len(tuple) != 2:\n STATUS = 2\n break\n Check_DNS(tuple[1])\n elif tuple[0] == EXTERNAL_WEB_NAME: # deprecated\n # lookup name onna bunch of DNSServers (at least one)\n if len(tuple) < 3:\n STATUS = 2\n break\n Check_ExternalWebName(tuple[1], tuple[2:])\n elif tuple[0] == GATEWAY:\n if len(tuple) != 2:\n STATUS = 2\n break\n Check_Gateway(tuple[1])\n elif tuple[0] == SYSLOG_SERVER:\n if len(tuple) != 2:\n STATUS = 2\n break\n Check_SYSLOG(tuple[1])\n elif tuple[0] == TEST_URL:\n if len(tuple) < 2:\n STATUS = 2\n break\n check_url(tuple[1], tuple[2:])\n else:\n STATUS = 2\n break\n print get_info(1)", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def go4ever():\n try:\n while(True):\n print('running')\n sleep(10)\n except:\n a=input('stop? (yes/no)')\n if a!='yes':\n go4ever()", "def _main_():\n while True:\n num = input(\"Please enter a number or done: \")\n if num == \"done\":\n print(bold(lightgreen(\"Thank You!\")))\n break\n else:\n try:\n num = int(num)\n if num < 0:\n num = abs(num)\n if num < 100:\n print(f\"Your number is negative {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is negative {hundreds_text(num)}\")\n elif num == 0:\n print(\"Your number is zero\")\n elif num < 100:\n print(f\"Your number is {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is {hundreds_text(num)}\")\n except Exception:\n print(info(bold(\"Not a valid input, try again\")))", "def loop(self):\n pass", "def main_loop(self):\n import time\n while not self.ask_for_stop:\n self.run_pending()\n time.sleep(self.delay)\n # FIXME this will look at self.ask_for_stop only every self.delay seconds\n # see https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def main_loop(self):\n try:\n self.state_machine.set_state('wait')\n\n while True:\n events = list(reversed(pygame.event.get())) # Take all events, most recent first\n\n if self.find_quit_event(events):\n break\n\n if self.find_fullscreen_event(events):\n self.window.toggle_fullscreen()\n\n event = self.find_resize_event(events)\n if event:\n self.window.resize(event.size)\n\n self.state_machine.process(events)\n\n finally:\n self.led_picture.quit()\n self.led_print.quit()\n GPIO.cleanup()\n self.camera.quit()\n self.printer.quit()\n pygame.quit()", "def waitenter(times=1):\n\n # For each time\n for _ in range(times):\n # Ask for user input\n input(\"\")", "def run_no_input(self, initial=None, context=None):\n for _ in self.run(initial, context):\n raise RuntimeError(\"Machine requires input to continue!\")", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def loop():\n\n load_config_project()\n\n L.debug(\"running with version: %s\", sys.version)\n is_version_2 = sys.version.startswith(\"2\")\n while True:\n response = ''\n if num_version == 2:\n response = raw_input(\"Enter command:\")\n if num_version == 3:\n response = input(\"Enter command:\")\n\n if response != '':\n commander.parse(response)\n sleep(0.5)", "def wait_for_input(self):\n if self._dont_enter_interactive_mode:\n return\n stop = False\n while True:\n print(\">>> \", end='')\n try:\n command_str = input()\n except EOFError:\n print(\"Exiting interactive mode\")\n break\n stop = self.interpret_command(command_str)\n if stop:\n print(\"Exiting interactive mode\")\n break", "def runloop(self, prompt=''):\n if not self.connected(): return\n\n try:\n self.start_anti_idle_timer()\n\n use_rawinput = True\n try:\n import readline\n except ImportError:\n use_rawinput = False\n\n while True:\n if use_rawinput:\n try:\n ln = raw_input(prompt)\n except EOFError:\n break\n else:\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n ln = sys.stdin.readline()\n if not len(ln):\n break\n\n ln = ln.rstrip('\\r\\n')\n if not ln:\n continue\n rc, res = self.run(ln)\n if not rc or not self.connected():\n break\n for l in res:\n sys.stdout.write(l + '\\n')\n finally:\n self.cancel_anti_idle_timer()", "def run():\n reset_calc()\n finish = False\n printCurrent()\n while not finish:\n printMenu()\n\n m = input().strip()\n if (m == 'x'):\n finish = True\n elif (m == '+'):\n m = input(\"Give nominator:\")\n n = input(\"Give denominator:\")\n try:\n calc_add (int(m), int(n))\n printCurrent()\n except ValueError:\n print (\"Enter integers for m, n, with not null n\")\n elif (m=='c'):\n reset_calc()\n printCurrent()\n elif (m=='u'):\n undo()\n printCurrent()\n else:\n print (\"Invalid command\")\n\n print (\"By!!!\")", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def processInputs(self):", "def input_reader_worker():\n while True:\n global last_read_player1_input\n global last_read_player2_input\n\n # First read movement inputs from adc\n # First write byte to read from Vin3 - player1 input channel\n try:\n bus.write_byte(I2CADDR, 0x80)\n last_read_player1_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player1_input to a default value instead.\")\n last_read_player1_input[\"movement\"] = config.adc_max_val / 2\n\n # Now write to read from Vin4 - player2 input channel\n try:\n bus.write_byte(I2CADDR, 0x40)\n last_read_player2_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player2_input to a default value instead.\")\n last_read_player2_input[\"movement\"] = config.adc_max_val / 2\n\n # Then read switch inputs from GPIO ports\n try:\n last_read_player1_input[\"stretch\"] = GPIO.input(config.gpio_pin_p1_stretch)\n last_read_player1_input[\"serve\"] = GPIO.input(config.gpio_pin_p1_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player1 switch input\")\n\n try:\n last_read_player2_input[\"stretch\"] = GPIO.input(config.gpio_pin_p2_stretch)\n last_read_player2_input[\"serve\"] = GPIO.input(config.gpio_pin_p2_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player2 switch input\")\n\n time.sleep(1 / float(config.adc_updates_per_sec))", "def main():\n while True:\n click.clear()\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n while True:\n select_data = choice('\\nPlease select the information you would'\n 'like to obtain:\\n'\n '\\n'\n '[ts] Time Stats\\n'\n '[ss] Station Stats\\n'\n '[tds] Trip Duration Stats \\n'\n '[us] User Stats\\n'\n '[rd] Raw Data\\n'\n '\\n'\n '[0] Exit\\n>',\n ('ts', 'ss', 'tds', 'us', 'rd', 'r'))\n click.clear()\n if select_data == 'ts':\n time_stats(df)\n elif select_data == 'ss':\n station_stats(df)\n elif select_data == 'tds':\n trip_duration_stats(df)\n elif select_data == 'us':\n user_stats(df)\n elif select_data == 'rd':\n display_data(df)\n elif select_data == '0':\n break\n\n restart = choice('\\nWould you like to restart?'\n 'Enter yes or no.\\n').lower()\n print()\n if restart.lower() != 'y':\n break", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def run(self):\n tick_duration = 1 / self.config.tick_rate\n last_tick_time = time.time()\n\n while True:\n input_ = self.input_source.get_input()\n self.__update(input_)\n\n if self.state.exit:\n break\n\n current_time = time.time()\n sleep_time = tick_duration - (current_time - last_tick_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n last_tick_time = current_time", "def Loop(self):\n self.coshell.SetModesCallback(self.SetModes)\n while True:\n try:\n text = self.Prompt()\n if text is None:\n break\n self.Run(text) # paradoxically ignored - coshell maintains $?\n except EOFError:\n # ctrl-d\n if not self.coshell.ignore_eof:\n break\n except KeyboardInterrupt:\n # ignore ctrl-c\n pass\n except interactive_coshell.CoshellExitError:\n break", "def main():\n user_interaction()", "def main():\n user_input_name()\n user_input_age()\n choose_unit()\n user_input_weight()\n user_input_height()\n bmi_calculator()\n bmi_categories()\n restart_calculator()", "def get_input(self):\n result = None\n\n try:\n while True:\n result = self.console.read_for_condition(prompt=\">>> \", condition=self.is_valid_input)\n\n if result is not None:\n break\n except KeyboardInterrupt:\n quit()\n\n # run command for next condition\n self.game_branch[result]()", "def run(self):\n while True:\n try:\n self.check_value()\n except Exception as e:\n print(f\"[ERROR]: Error running the continuous run loop on sample_module: {e}\")\n continue", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def main() -> None:\n manager = Manager()\n queue_log_record = manager.Queue(-1)\n path_file_input = LocalSocket.receive()\n LocalSocket.send(\"Next\")\n path_file_output = LocalSocket.receive()\n basicConfig(stream=sys.stdout, level=DEBUG)\n logger = getLogger(__name__)\n try:\n KeyboardInterrupter(\n example_use_case_interrupt(path_file_input, path_file_output, queue_log_record, worker_configurer),\n get_process_id(),\n ).test_keyboard_interrupt()\n except KeyboardInterrupt:\n logger.debug(\"__main__ KeyboardInterrupt\")\n check_log(queue_log_record)\n LocalSocket.send(\"Test succeed\")\n logger.debug(\"__main__ sleep\")\n finally:\n time.sleep(10)", "def loop(self):\n raise NotImplementedError()", "def main_loop(self):\n center_point = self.mot.center_point\n\n screen_width = center_point[0] * 2\n screen_height = center_point[1] * 2\n\n time.sleep(1)\n pretty_progress_bar(\n 3,\n )\n\n # while int(time.time()) - start <= 10:\n while not self.mot.abort:\n object1_position = self.mot.position(self.color1)[0]\n object2_velocity = self.mot.speed(self.color2)\n # print(object2_velocity)\n\n self.compare(object1_position[0] < 0.25 * screen_width, 'left')\n self.compare(object1_position[0] > 0.75 * screen_width, 'right')\n self.compare(object1_position[1] < 0.25 * screen_height, 'jump')\n self.burst_compare(object2_velocity > 150, 'fire')\n\n # print('KEYBOARD ABORT')", "def run(self):\n try:\n while True:\n in_buff = self.stream.read_in_buf()\n for message in in_buff:\n packet = PacketFactory.parse_buffer(message)\n self.handle_packet(packet)\n self.stream.clear_in_buff()\n self.handle_user_interface_buffer()\n self.stream.send_out_buf_messages(self.reunion_mode == ReunionMode.FAILED)\n time.sleep(2)\n except KeyboardInterrupt:\n log('KeyboardInterrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def main(self):\n self.startup()\n if self.vehicle:\n try:\n while not self._loop_should_exit:\n self.tick()\n time.sleep(1)\n except KeyboardInterrupt:\n self.cleanup()\n self.cleanup()", "def main():\r\n words = hangman_helper.load_words(file='words.txt')\r\n run_single_game(words)\r\n type_of_input=hangman_helper.get_input()\r\n while type_of_input[1]:\r\n run_single_game(words)\r\n type_of_input = hangman_helper.get_input()", "def blocking_input_loop(figure, event_names, timeout, handler):\n if figure.canvas.manager:\n figure.show() # Ensure that the figure is shown if we are managing it.\n # Connect the events to the on_event function call.\n cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]\n try:\n figure.canvas.start_event_loop(timeout) # Start event loop.\n finally: # Run even on exception like ctrl-c.\n # Disconnect the callbacks.\n for cid in cids:\n figure.canvas.mpl_disconnect(cid)", "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")", "def run(self):\n while True:\n display(self.world.draw())\n self.read_and_process_input()", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def main():\n \n try:\n roadmap = read_cities(input('Please enter the file location: '))\n print('')\n print_cities(roadmap)\n best = find_best_cycle(roadmap)\n print_map(best)\n run_viz = input('>> Run visualisation function? (Y/N): ')\n print('')\n while run_viz != 'N':\n if run_viz == 'Y':\n print('Two windows opened. Please close to end.')\n visualise(best)\n break\n else:\n print('Please type only Y or N')\n run_viz = input('>> Run visualisation function? (Y/N): ')\n except Exception as e:\n print(str(e))", "def main():\r\n global user_pick, pickno, total\r\n test_total()\r\n sleep(delay)\r\n print(\"It is your turn!\")\r\n pickno = int(4)\r\n #Repeats the process as many times as we need\r\n while total >= 4:\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain\r\n test_pick()\r\n remain()\r\n cmp_logic()\r\n sleep(delay)\r\n print(\"You should pick \" + str(total))\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain()\r\n test_pick()\r\n remain()\r\n # Only way that USER WINS!!\r\n if int(total) == 0:\r\n sleep(delay)\r\n print(\"User WINS!\")\r\n exit()", "def run(self):\n self.cmdloop()", "def main(): \n while True:\n city, month, day = get_filters()\n\n df = generate_stats(city, month, day)\n\n # step through data\n step_through = input('\\nWould you like to step through raw data? Type \"yes\" to step through: \\n')\n if step_through.lower() == 'yes':\n step_through_data(df)\n\n # restart\n restart = input('\\nWould you like to restart? Type \"yes\" to restart.\\n')\n if restart.lower() != 'yes':\n break", "def ask_numbers(question, error):\n while True:\n value = 0\n try:\n value = int(input(question))\n except ValueError:\n print(error)\n except UnboundLocalError:\n print(error)\n except Exception:\n print(error)\n if value <= 0:\n print(\"Syötä positiivinen luku, joka on suurempi kuin 0\\n->\")\n else:\n break\n return value", "def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )", "def validated_input(input_msg: str, error_msg: str, validator, screenshot:str =None):\n while(True):\n reset_screen()\n\n if screenshot is not None:\n print(screenshot)\n\n data = input(input_msg)\n\n try:\n return validator(data)\n except:\n reset_screen()\n popup(error_msg.format(data), screenshot)\n input(\"\")", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def get_user_input():\n while True:\n try:\n user_action = int(input(\"Enter (1) Search Books (2) Move Books (3) Exit\\n\"))\n for index, value in enumerate(USER_INPUT_SERVICE(), 1):\n if user_action == index:\n return value\n if user_action > 3:\n raise IndexError(\"Please enter 1, 2, or 3.\")\n except ValueError:\n print(\"Please enter the number.\")", "def get_user_input(wanted_type):\n while True:\n user_input = input(\"Please enter your value: \")\n if user_input == \"q\" or user_input == \"exit\":\n raise SystemExit # Nasty way to kill the program :p\n try:\n if wanted_type == bool:\n if user_input.strip() in {\"true\",\"t\",\"True\",\"T\",\"yes\"}: return True\n if user_input.strip() in {\"false\",\"f\",\"False\",\"F\",\"no\"}: return False\n raise ValueError # If not true or false\n if wanted_type == str:\n answer = user_input.strip().upper()\n if answer in {\"A\",\"B\",\"C\",\"D\",\"E\"}: return answer\n if user_input.strip() in {\"true\",\"t\",\"True\",\"T\",\"yes\"}: return \"T\"\n if user_input.strip() in {\"false\",\"f\",\"False\",\"F\",\"no\"}: return \"F\"\n raise ValueError # If not one of the possible errors\n\n return wanted_type(user_input) # Convert input to wanted_type\n except ValueError:\n if wanted_type == bool:\n print(\"Please give either true or false as your answer.\")\n elif wanted_type == str:\n print(\"Please give either A B C D or T/F as your answer.\")\n elif wanted_type == float:\n print(\"Please provide your answer as a number to 2 decimal places.\")\n elif wanted_type == int:\n print(\"Please provide your answer as a whole number.\")\n else:\n print(\"Error function called inccorectly. Called with {}\".format(wanted_type))\n raise ValueError", "def work3():\n logging.info(\"work3 doing a job\")\n if random.randint(1, 5) == 1:\n logging.error(\"Error in work3: bad input\")", "def main():\n \"\"\"get and format the data\"\"\"\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n try:\n while 1:\n for drawable in draw_list:\n print(drawable[0])\n print(drawable[1], \"\\n\")\n time.sleep(6.5)\n weather.update()\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupt detected, exiting...\")", "def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False", "def loop(self):\n while self.dispatch(True) is not QUIT:\n pass", "def setup():\r\n value = input(\"Enter a positive decimal integer (\\\"quit\\\" to stop): \")\r\n\r\n while value.lower() != \"quit\":\r\n binary_calc(int(value)) # Calls converter function on inputted value\r\n print(\"\\n\")\r\n hex_calc(int(value)) # Calls converter function on inputted value\r\n value = input(\r\n \"\\nEnter a positive decimal integer (\\\"quit\\\" to stop): \")", "def user_input():\n #Error messages\n num_invalid = \"Invalid input, please insert a valid number\"\n str_invalid = \"Invalid input, please try again following the input conventions requested\"\n\n #Model Type\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n while model_type not in {'ib', 'sc'}:\n print(str_invalid)\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n\n #Compound\n compound = input(\"What compound or drug are you using?\")\n \n #Dose Type\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n while dose_type not in {\"c\",\"i\",\"r\"}:\n print(str_invalid)\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n\n if dose_type == 'c':\n while True:\n try:\n dose = float(input(\"What is the dose of \" + compound + \" that you want to test? (units in ng per hour): \"))\n break\n except:\n print(num_invalid)\n dose_mass = None\n time_dose = None\n num_dose = None\n \n elif dose_type == 'i':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n dose = None\n time_dose = None\n num_dose = None\n\n elif dose_type == 'r':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n time_dose = float(input(\"What time period are the doses given over? (units in hours): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n num_dose = float(input(\"How many doses are given? - this program assumes that doses are evenly spaced throughout the time period: \"))\n break\n except:\n print(num_invalid)\n dose = None\n \n #Length of simulation time\n while True:\n try:\n len_assay = float(input(\"What time period would you like to simluate the model? (units in hours): \"))\n break\n except:\n\t print(num_invalid)\n \n #Interval times\n while True:\n try:\n len_interval = float(input(\"What interval time would you like in the simulation? (units in hours): \"))\n break\n except:\n print(num_invalid)\n\n #clearance\n while True:\n try:\n clearance = float(input(\"What is the clearance rate? (units in ng/hour): \"))\n break\n except:\n print(num_invalid)\n\n \n #compartments\n compartments = []\n\n if model_type == \"ib\":\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n\n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n\n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n\n compart_list = None\n \n elif model_type == \"sc\":\n while True:\n try:\n sub_compart = input(\"Enter volume (L), transition rate (ng/hour) for the sub compartment (all seperated by spaces - eg: 5 25 ): \")\n sub_compart_split = sub_compart.split()\n sub_compart_split = [float(i) for i in sub_compart_split]\n break\n except:\n print(str_invalid)\n\n sub_compart_split.append(str(\"Sub\"))\n compartments.append(sub_compart_split)\n\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n \n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n \n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n compart_list = None\n\n #visualisation\n vis = input(\"Would you like to generate a graph? (Y/N): \")\n while vis not in {'Y','y','N','n'}:\n print(str_invalid)\n vis = input(\"Would you like to generate a graph? (Y/N): \") \n\n #unix timestamp\n curr_datetime = time.time()\n curr_datetime = str(curr_datetime)\n\n\n print(\"Thank you! Building model, please wait...\")\n\n\n return {\n 'model_type': model_type,\n 'compound': compound,\n 'dose_type': dose_type,\n 'dose':dose,\n 'dose_mass': dose_mass,\n 'time_dose': time_dose,\n 'num_dose': num_dose,\n 'len_assay':len_assay,\n 'len_interval':len_interval,\n 'clearance':clearance,\n 'compartments':compartments,\n 'vis':vis,\n 'curr_datetime':curr_datetime\n }", "def run(self):\n self.cycle = 0\n self.error_code = None\n\tsubNumber = 0\n\ttimestarted = time.time() \n\tself.failCount = 0\n while True:\n\t self.cycle += 1\n\t self.seen_per_cycle = 0\n\t self.step = 0\n\n #if not self.running:\n # self.restart()\n # return\n try:\n\t\tif (config.MAX_CYCLES_TILL_QUIT+1 <= self.cycle-self.failCount):\n\t \t if self.error_code == None:\n\t\t\tself.error_code = 'COMPLETE'\n\t\t else:\n\t\t\tself.error_code = self.error_code + \"-C\"\n\t\t return\n\n\t\tcurrentTime = time.time()\n\t\tif (config.SLEEP == 1 and currentTime - timestarted > config.MAX_TIME_AWAKE):\n\t\t\tsubNumber = subNumber + 1\n\t\t\ttimestarted = currentTime\n\t\t\tif (subNumber > utils.getSubMultiplier()):\n\t\t\t\tsubNumber = 0\n\t\telse:\n \tif (self.cycle > 1):\n \t\ttime.sleep(random.randint(30, 60))\n\t\t\telse:\n\t\t\t time.sleep(1)\n\n\t\tif self.failCount >= 3:\n\t \t if self.error_code == None:\n\t\t\tself.error_code = 'STOPPED'\n\t\t else:\n\t\t\tself.error_code = self.error_code + \"-D\"\n\t\t return\n\n self.error_code = None\n\n \tsuccess = self.login(subNumber, self.numActiveAtOnce)\n\n \tif not success:\n\t\t self.failCount = self.failCount + 1\n\t\t time.sleep(3)\n\t\t continue\n\n\t\tlogger.info(\"Logged into: \" + self.username)\t\t\n\n\t\tself.main()\n\n except BannedAccount:\n \tlogger.info(self.username + \" appears to be banned\")\n\t self.error_code = 'BANNED'\n# self.restart(30, 90)\n #return\n\t\tself.failCount = self.failCount + 1\n\t\tcontinue\n\t # this only occurs if it is non fixable, fixable ones are handled where it was running\n except CaptchaAccount:\n\t logger.info(\"Stopping worker as there appear to be no more accounts\")\n\t\t\tself.error_code = self.error_code + \"-X\"\n\t\t\treturn\n except FunkyAccount:\n\t logger.info(\"Stopping worker as this account is being funky\")\n\t\t\tif self.error_code is None:\n\t\t\t\tself.error_code = \"FUNKY\"\n\t\t\telse:\n\t\t\t\tself.error_code = self.error_code + \"-F\"\n\t\t\treturn\n except Exception:\n logger.exception('A wild exception appeared!')\n self.error_code = 'EXCEPTION'\n #self.restart()\n #return\n\t\tself.failCount = self.failCount + 1\n\t\tcontinue\n #if not self.running:\n # self.restart()\n # return\n\t self.failCount = 0\n #if self.cycle <= config.CYCLES_PER_WORKER:\n # logger.info('Going to sleep for a bit')\n # self.error_code = 'SLEEP'\n #self.running = False\n # logger.info('AWAKEN MY MASTERS')\n #self.running = True\n #self.error_code = None\n #self.error_code = 'RESTART'\n #self.restart()", "def _get_inputs(self):\n getting_letter = True\n while getting_letter:\n try:\n guess = self._console.read(\"Guess a letter [a-z]:\")\n if guess.lower() >= \"a\" and guess.lower() <= \"z\":\n self._puzzle.get_guess(guess)\n getting_letter = False\n elif len(guess) > 1:\n print(\"Only a single letter is accepted here.\")\n else:\n print(\"Error: You suck!\")\n \n except ValueError:\n print(\"Only strings are allowed for this input.\")\n print(\"\")", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def loop(self):\n while True:\n if self.gui_updates:\n self.update_gui()\n\n event, values = self.window.read(100)\n\n if event == \"btn_con_game\":\n Thread(target=self.connect_game, daemon=True).start()\n elif event == \"btn_con_headset\":\n Thread(target=self.connect_headset, daemon=True).start()\n elif event == \"btn_train_model\":\n Thread(target=self.train_model, daemon=True).start()\n elif event == \"btn_finalize\":\n Thread(target=self.finalize, daemon=True).start()\n \n to_update = self.loading.copy()\n for update in to_update:\n self.window.Element(f'{update}_loading').UpdateAnimation('assets/loading.gif')\n\n # End program if user closes window\n if event == sg.WIN_CLOSED:\n break\n\n self.window.close()", "def main():\n is_program_working = True\n while is_program_working:\n display.print_program_menu(MAIN_MENU)\n try:\n choose_option()\n except ValueError as err:\n display.print_command_result(str(err))", "def _start_interactively():\n while True:\n array = input(\"Please type in the array\"\n \" that should be checked for being a latin square: \")\n row_length = int(input(\"Please type in the length \"\n \"of each row in your array: \"))\n print(is_latin_square(row_length, array))\n print(\"\")", "def __exit_condition(data_logger):\n try:\n while True:\n raw_input(\"\") # FIXME: is raw_input the right approach\n if CLOSE:\n raise KeyboardInterrupt()\n\n except (KeyboardInterrupt, EOFError):\n sys.stdin.close()\n data_logger.stop()", "def main():\n exit = False\n thinker = Thinker()\n while not exit:\n print(thinker)\n command = input(\">\")\n if command == \"exit\":\n exit = True\n else:\n handler = InputHandler(command)\n handler.parse()(thinker)\n\n print(\"Bye\")", "def main_loop(self):\n print(\"Welcome to the Natural Language shell.\\n\"\n + \"Enter your English commands in the terminal.\\n\"\n + \"Enter 'dictionary' to print list of supported commands.\\n\"\n + \"Enter 'exit' to exit the program.\")\n inpt = \"\"\n #Input loop\n while inpt != \"exit\":\n print(\"\")\n inpt = input(\"Enter Command: \")\n try:\n if inpt == 'dictionary':\n print(self.dictionary)\n elif inpt == 'exit':\n break\n else:\n self.command(grammify(inpt))\n except ValueError:\n print(\"Command couldn't be completed. Check your spelling and format.\")\n except UnboundLocalError:\n print(\"Command couldn't be completed. Check your spelling and format.\")", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def int_input():\n while True:\n try:\n n = int(input(\"Enter amount of cubes(n): \"))\n if n < 1 or n > 100:\n print(\"Input must be a positive integer [1, 100]!\")\n continue\n except ValueError:\n print(\"Not an integer!\")\n continue\n\n print(\"There are %d different stairs that can be build from %d cubes.\" % (amount_of_stairs(n), n))\n break", "def prepare_to_advance(self):\n\n self.capture_user_input()\n self.UI.reset_figure()\n # stopping the blocking event loop\n self.fig.canvas.stop_event_loop()", "def get_input(self):\n while True:\n try:\n self.rows = int(input(\"Number of rows: \"))\n while self.rows < 2 or self.rows > 30:\n self.rows = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.cols = int(input(\"Number of columns: \"))\n while self.cols < 2 or self.cols > 30:\n self.cols = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.mines = int(input(\"Number of mines: \"))\n while self.mines < 1 or (self.mines >= self.rows * self.cols):\n tile_count = self.rows * self.cols\n self.mines = int(input(\"Please enter a number between 1 and \" + str(tile_count - 1) + \": \"))\n break\n except ValueError:\n print(\"Please enter a number!\")", "def start(self):\n self.running = True\n while self.running:\n self.update_prompt()\n with exception_handler(self.engine):\n self.cmdloop()\n self.engine.reset()", "def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")", "def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)", "def test_try():\n numb = input(\"Enter a number\")\n print(type(numb))\n try:\n num = float(numb)\n print(num)\n except Exception as e: # if there is exception,we wont crash,we will catch it\n print(e)\n print(\"Exiting the program\")", "def main():\n while True:\n primary_message() # <=== Display menu and take input\n x = choice(9)\n z = tempConv\n\n if x == 1:\n # This is the From Celsius options\n t = value_check(\"C\", tempConv.cel_ran)\n secondary_message(t, \"C\")\n y = choice(9, 1)\n\n while True:\n if y == 2:\n t2 = z.cel_fah(t) # <=== Fahrenheit\n result_message(t, t2, \"C\", \"F\")\n break\n elif y == 3:\n t2 = z.cel_kel(t) # <=== Kelvin\n result_message(t, t2, \"C\", \"K\")\n break\n elif y == 4:\n t2 = z.cel_ran(t) # <=== Rankin\n result_message(t, t2, \"C\", \"R\")\n break\n elif y == 5:\n t2 = z.cel_del(t) # <=== Delisle\n result_message(t, t2, \"C\", \"De\")\n break\n elif y == 6:\n t2 = z.cel_new(t) # <=== Newton\n result_message(t, t2, \"C\", \"N\")\n break\n elif y == 7:\n t2 = z.cel_rea(t) # <=== Reaumur\n result_message(t, t2, \"C\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.cel_rom(t) # <=== Romer\n result_message(t, t2, \"C\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 2:\n t = value_check(\"F\", tempConv.fah_ran)\n secondary_message(t, \"F\")\n y = choice(9, 2)\n\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, \"F\", \"C\")\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, \"F\", \"K\")\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, \"F\", \"R\")\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, \"F\", \"De\")\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, \"F\", \"N\")\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, \"F\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, \"F\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 3:\n t = value_check(\"K\", tempConv.kel_ran)\n secondary_message(t, \"K\")\n y = choice(9, 3)\n\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, \"K\", \"C\")\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, \"K\", \"F\")\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, \"K\", \"R\")\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, \"K\", \"De\")\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, \"K\", \"N\")\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, \"K\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, \"K\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 4:\n t = value_check(\"R\", tempConv.ran_rea)\n secondary_message(t, \"R\")\n y = choice(9, 4)\n\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, \"R\", \"C\")\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, \"R\", \"F\")\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, \"R\", \"K\")\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, \"R\", \"De\")\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, \"R\", \"N\")\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, \"R\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, \"R\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 5:\n t = value_check(\"De\", tempConv.del_ran)\n secondary_message(t, \"De\")\n y = choice(9, 5)\n\n while True:\n\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, \"De\", \"C\")\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, \"De\", \"F\")\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, \"De\", \"K\")\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, \"De\", \"R\")\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, \"De\", \"N\")\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, \"De\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, \"De\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 6:\n t = value_check(\"N\", tempConv.new_ran)\n secondary_message(t, \"N\")\n y = choice(9, 6)\n\n while True:\n\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, \"N\", \"C\")\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, \"N\", \"F\")\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, \"N\", \"K\")\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, \"N\", \"R\")\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, \"N\", \"N\")\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, \"N\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, \"N\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 7:\n t = value_check(\"R\\u00e9\", tempConv.rea_ran)\n secondary_message(t, \"R\\u00e9\")\n y = choice(9, 7)\n\n while True:\n\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, \"R\\u00e9\", \"C\")\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, \"R\\u00e9\", \"F\")\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, \"R\\u00e9\", \"K\")\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, \"R\\u00e9\", \"R\")\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, \"R\\u00e9\", \"De\")\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, \"R\\u00e9\", \"N\")\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, \"R\\u00e9\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 8:\n t = value_check(\"R\\u00f8\", tempConv.rom_ran)\n secondary_message(t, \"R\\u00f8\")\n y = choice(9, 8)\n\n while True:\n\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, \"R\\u00f8\", \"C\")\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, \"R\\u00f8\", \"F\")\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, \"R\\u00f8\", \"K\")\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, \"R\\u00f8\", \"R\")\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, \"R\\u00f8\", \"De\")\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, \"R\\u00f8\", \"N\")\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, \"R\\u00f8\", \"R\\u00e9\")\n break\n elif y == 9:\n break\n\n elif x == 9:\n print(Fore.CYAN + \"\\n Goodbye!\" + Fore.RESET)\n i = 0\n break", "def askformove(b):\n while True:\n print(b)\n userInput = input(\"enter your move \")\n try:\n userInput= int(userInput)\n assert(userInput <= b.width )\n assert(b.allowsMove(userInput))\n except (ValueError,AssertionError):\n print(\"enter a diff move\")\n continue\n return userInput", "def main():\n while True:\n city, month, day = get_filters()\n\n df = load_data(city, month, day)\n\n time_stats(df, city, month, day)\n station_stats(df, city)\n trip_duration_stats(df, city)\n # The city of washington does not provide user statistics\n if city != \"washington\":\n user_stats(df, city)\n\n sample = input(\n \"\\nIf you would like a sample of the raw date, enter 'yes' ===> \"\n )\n if sample.lower() == \"yes\":\n review_data(df)\n\n restart = input(\"\\nEnter 'yes' if you would like to restart ===> \")\n if restart.lower() != \"yes\":\n break", "def start(self):\n while True:\n import traceback\n try:\n command, args = self.parse_line(input(self.prompt))\n if not command:\n self.refresh()\n if command == \"exit\" or command == \"quit\":\n break\n command_handler = self.get_command_handler(command)\n command_handler(args)\n self.refresh()\n except KeyboardInterrupt:\n print(\"\")\n except AttributeError:\n pass\n except:\n traceback.print_exc()\n pass", "def run(self):\n self.initialise()\n self.setup_disks()\n self.solve_puzzle()\n input('Finished. Press ENTER to exit.')" ]
[ "0.68585896", "0.68095475", "0.6770353", "0.65708673", "0.6512728", "0.6459991", "0.6366776", "0.6324164", "0.62918556", "0.6262099", "0.6262099", "0.62162334", "0.6207645", "0.6178404", "0.6168539", "0.61427873", "0.60846925", "0.60826176", "0.6050576", "0.60469836", "0.6045079", "0.60230464", "0.60014015", "0.5992997", "0.59754676", "0.59628123", "0.5949667", "0.5921821", "0.5917723", "0.5896875", "0.586957", "0.5861052", "0.5848863", "0.5844181", "0.5838214", "0.58156985", "0.581269", "0.5800177", "0.57928693", "0.5786384", "0.5781061", "0.5777854", "0.57700247", "0.5750574", "0.57330906", "0.57329845", "0.57281166", "0.571694", "0.5703866", "0.56979907", "0.5695928", "0.56951845", "0.5691683", "0.56894016", "0.56801385", "0.56722426", "0.56707346", "0.56695276", "0.56559336", "0.56543195", "0.5643169", "0.56427604", "0.5636774", "0.5632529", "0.56310934", "0.5622912", "0.5620662", "0.5612905", "0.56023264", "0.5593757", "0.5584884", "0.55812544", "0.5577268", "0.55705774", "0.5558092", "0.55535775", "0.5549236", "0.5549226", "0.5545194", "0.5544741", "0.55339354", "0.55329514", "0.5531201", "0.5525836", "0.5524823", "0.5523423", "0.5522263", "0.5515789", "0.5510221", "0.5508498", "0.5504116", "0.55011964", "0.54913753", "0.5487325", "0.5484855", "0.54823786", "0.54762965", "0.5473257", "0.5472724", "0.5468896", "0.5467463" ]
0.0
-1
Create an instance to manager the blob properties extraction.
def Instance(self): if self.__Instance is None: self.__Instance = Properties() return self.__Instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, blob=None):\n if blob is None:\n self.versionCode = '0'\n self.data = {}\n else:\n self.versionCode = blob[0]\n encoded = blob[1:]\n compressed = base64.b64decode(encoded)\n self.data = json.loads(zlib.decompress(compressed))", "def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n blob_container_name: Optional[pulumi.Input[str]] = None,\n resource_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", 'StorageBlob')\n if blob_container_name is not None:\n pulumi.set(__self__, \"blob_container_name\", blob_container_name)\n if resource_id is not None:\n pulumi.set(__self__, \"resource_id\", resource_id)", "def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addFile(id, file, title)\n return container.get(id, None)", "def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'", "def __init__(self, properties):\n #: Content properties\n #:\n #: :type: dict[str, str|dict]\n self.properties = properties", "def __init__(self):\n self.swagger_types = {\n 'status': 'str',\n 'download_url': 'str',\n 'download_id': 'str',\n 'message': 'str',\n 'number_of_pages': 'int',\n 'validation_errors': 'str'\n }\n\n self.attribute_map = {\n 'status': 'status',\n 'download_url': 'download_url',\n 'download_id': 'download_id',\n 'message': 'message',\n 'number_of_pages': 'number_of_pages',\n 'validation_errors': 'validation_errors'\n }\n\n self._status = None\n self._download_url = None\n self._download_id = None\n self._message = None\n self._number_of_pages = None\n self._validation_errors = None", "def __init__(self, config_filename, machine_config_filename=\"/usr/sap/backup/backup.conf\"):\n self.db_config_file = BackupConfigurationFile(filename=config_filename)\n self.machine_config_file = BackupConfigurationFile(filename=machine_config_filename)\n self.instance_metadata = AzureVMInstanceMetadata.create_instance()\n self._block_blob_service = None", "def __init__(self):\n\n try:\n # read attributes from attributes file\n with open(const.Storage.ATTRIBUTES) as attributes_file:\n # read the file and parse it to JSON data\n json_data = attributes_file.read()\n attributes = json.loads(json_data)\n\n # set attributes\n self.id = str(attributes[\"id\"])\n self.length = float(attributes[\"length\"])\n self.width = float(attributes[\"width\"])\n except OSError:\n raise OSError(\"The attributes file could not be opened.\")", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def create(cls, _):\n return cls", "def __init__(self, metadata_folder='./'):\n self.metadata = self.load_metadata(metadata_folder)\n self.prefix = 'data/adience/faces/'\n return", "def __init__(self):\n super(MemoryPersistence, self).__init__(descriptor)", "def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addImage(id, file, title)\n return container.get(id, None)", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def __init__(self):\n self.CVE_BUCKET = os.environ.get(\"REPORT_BUCKET_NAME\", '')\n self.AWS_KEY = os.environ.get(\"AWS_S3_ACCESS_KEY_ID_REPORT_BUCKET\", '')\n self.AWS_SECRET = os.environ.get(\"AWS_S3_SECRET_ACCESS_KEY_REPORT_BUCKET\", '')\n self.AWS_REGION = os.environ.get(\"AWS_S3_REGION\", \"us-east-1\")\n self.HOST = os.environ.get('BAYESIAN_DATA_IMPORTER_SERVICE_HOST', 'bayesian-data-importer')\n self.PORT = os.environ.get('BAYESIAN_DATA_IMPORTER_SERVICE_PORT', '9192')\n\n self.s3_resource = boto3.resource('s3', aws_access_key_id=self.AWS_KEY,\n aws_secret_access_key=self.AWS_SECRET,\n region_name=self.AWS_REGION)", "def __init__(self, metadata_folder='./'):\n self.metadata = self.load_metadata(metadata_folder)\n self.prefix = 'data/miap/images/'\n return", "def new_blob(self, blob_name):\n return storage.Blob(blob_name, self.bucket)", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'catalog_id': 'str',\n 'uri': 'str',\n 'job_type': 'str',\n 'lifecycle_state': 'str',\n 'is_sample_data_extracted': 'bool',\n 'time_created': 'datetime'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'catalog_id': 'catalogId',\n 'uri': 'uri',\n 'job_type': 'jobType',\n 'lifecycle_state': 'lifecycleState',\n 'is_sample_data_extracted': 'isSampleDataExtracted',\n 'time_created': 'timeCreated'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._catalog_id = None\n self._uri = None\n self._job_type = None\n self._lifecycle_state = None\n self._is_sample_data_extracted = None\n self._time_created = None", "def setUpClass(cls):\n\n cls.s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n cls.direct_expression = cls._read_expression_direct()\n cls.direct_cell = cls._read_cell_direct()", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def __init__(self, name=None, properties=None):\n\n super().__init__()\n self._netlist = None\n self._definitions = list()\n _call_create_library(self)\n if name != None:\n self.name = name\n\n if properties != None:\n assert isinstance(\n properties, dict), \"properties must be a dictionary\"\n for key in properties:\n self[key] = properties[key]", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def __init__(__self__, *,\n archive_name_format: Optional[pulumi.Input[str]] = None,\n blob_container: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n storage_account: Optional[pulumi.Input['EventhubSpecPropertiesCaptureDescriptionDestinationStorageAccountArgs']] = None):\n if archive_name_format is not None:\n pulumi.set(__self__, \"archive_name_format\", archive_name_format)\n if blob_container is not None:\n pulumi.set(__self__, \"blob_container\", blob_container)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if storage_account is not None:\n pulumi.set(__self__, \"storage_account\", storage_account)", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def init_instance(self, prop):\n out_prop = copy.deepcopy(prop)\n out_prop[\"instance\"] = prop[\"vm_name\"]\n return dict(cloud=out_prop)", "def __init__(self, metadata, environment, component, image, version, s3_bucket, exec_env):\n self.environment = environment\n self.component = component\n self.s3_bucket = s3_bucket\n self.exec_env = exec_env\n self.image = image\n self.version = version\n self.metadata = metadata\n\n # generate Terragrunt config as part of object initialisation\n self.config()", "def __init__(\n self,\n response: dict\n ):\n\n self.__name = read_value(\n \"name\", response, str, True)\n self.__uuid = read_value(\n \"uuid\", response, str, True)\n self.__note = read_value(\n \"note\", response, str, True)\n self.__location = read_value(\n \"location\", response, str, True)\n self.__datacenter_uuid = read_value(\n \"datacenter.uuid\", response, str, True)\n self.__row_uuids = read_value(\n \"rows.uuid\", response, str, False)\n self.__row_count = read_value(\n \"rowCount\", response, int, True)\n self.__rack_count = read_value(\n \"rackCount\", response, int, True)\n self.__host_count = read_value(\n \"hostCount\", response, int, True)", "def __init__(self):\n\n self.reader = reader.Reader()", "def new(self):\n\n self.obj = self.factory()\n\n if self.textproperty is None:\n self.attributes = ElementHandler.load_definitions(self, self.obj)", "def create_storage(conf):\n _name = conf.get(\"name\", \"\")\n _cls = importer(conf['class'])\n _kwargs = conf['kwargs']\n _io = importer(_kwargs['io_class'])\n return _cls(_kwargs[\"storage_config\"], name=_name, io_class=_io)", "def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'error_entity': 'DomainEntityRef',\n 'related_entity': 'DomainEntityRef',\n 'timestamp': 'datetime',\n 'level': 'str',\n 'category': 'str',\n 'correlation_id': 'str',\n 'event_message': 'EventMessage',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'error_entity': 'errorEntity',\n 'related_entity': 'relatedEntity',\n 'timestamp': 'timestamp',\n 'level': 'level',\n 'category': 'category',\n 'correlation_id': 'correlationId',\n 'event_message': 'eventMessage',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._error_entity = None\n self._related_entity = None\n self._timestamp = None\n self._level = None\n self._category = None\n self._correlation_id = None\n self._event_message = None\n self._self_uri = None", "def create(cls):\n pass\n return cls()", "def __init__(self):\n\n self.logger = utils.get_logger()\n\n # set constants\n constants = models.get_asset_dicts('preferences')\n for key, value in constants.items():\n setattr(self, key, value)", "def __init__(self,imageObject):\n getJsonEnv()\n self.meta=imageObject\n self.imageNames=self.getImageName()\n self.gcsBucket=self.setUpCredentials()\n self.sources = []", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self):\n self.properties = {}", "def __init__(self, buff):\n fmt = '[iSi] [hS [hii [i] [i] ] ]'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n broker_info, topics = response\n\n self.brokers = {}\n for (id_, host, port) in broker_info:\n self.brokers[id_] = BrokerMetadata(id_, host, port)\n\n self.topics = {}\n for (err, name, partitions) in topics:\n part_metas = {}\n for (p_err, id_, leader, replicas, isr) in partitions:\n part_metas[id_] = PartitionMetadata(id_, leader, replicas,\n isr, p_err)\n self.topics[name] = TopicMetadata(name, part_metas, err)", "def __init__(self, conn, name, alias, **kwargs):\n super().__init__()\n\n self._alias = alias\n self._name = name\n self._properties = kwargs\n\n lv_pool = conn.storagePoolLookupByName(self.pool)\n lv_pool.createXML(\n self._volume_xml(),\n libvirt.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA\n )\n lv_volume = lv_pool.storageVolLookupByName(self.name)\n self._path = lv_volume.path()\n\n self._xml = ElementTree.Element('disk')\n self._xml.attrib['type'] = 'file'\n self._xml.attrib['device'] = 'disk'\n driver_element = ElementTree.Element('driver')\n driver_element.attrib['name'] = 'qemu'\n driver_element.attrib['type'] = 'qcow2'\n self._xml.append(driver_element)\n target_element = ElementTree.Element('target')\n target_element.attrib['dev'] = self.target\n target_element.attrib['bus'] = 'virtio'\n self._xml.append(target_element)\n source_element = ElementTree.Element('source')\n source_element.attrib['file'] = self.path\n self._xml.append(source_element)\n alias_element = ElementTree.Element('alias')\n alias_element.attrib['name'] = 'virtio-%s' % self.alias\n self._xml.append(alias_element)\n\n LOG.debug(\"Define virtual disk %s (%s bytes)\", self.name, self.capacity)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'channel_id': 'str',\n 'channel_secret': 'str',\n 'switcher_secret': 'str',\n 'service_code': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'channel_id': 'channelId',\n 'channel_secret': 'channelSecret',\n 'switcher_secret': 'switcherSecret',\n 'service_code': 'serviceCode',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._channel_id = None\n self._channel_secret = None\n self._switcher_secret = None\n self._service_code = None\n self._self_uri = None", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def __init__(self, name: str, **properties: str):\n super().__init__(name, **properties)\n self.uri = properties[URI]\n self._fetch_config()\n self._session = self._create_session()", "def __init__(self, api=None, properties=None):\n if not api is None:\n self.api = api", "def __init__(self):\n self.config = configs.Configuration()\n self.log = logger.CustomLogger(__name__).get_logger()\n self.output_dir = self.config.getConfigValue('OUTPUT_DIR')\n self.s3_directory = self.config.getConfigValue('S3_FILE_PATH_TRANSFORM')", "def __init__(self, *properties):\n self._properties = properties", "def __init__(self, commit_hash=None, distro_hash=None, extended_hash=None,\n aggregate_hash=None, promote_name=None, timestamp=None,\n user=None, repo_hash=None, repo_url=None, component=None):\n self.swagger_types = {\n 'commit_hash': 'str',\n 'distro_hash': 'str',\n 'extended_hash': 'str',\n 'aggregate_hash': 'str',\n 'promote_name': 'str',\n 'timestamp': 'int',\n 'user': 'str',\n 'repo_hash': 'str',\n 'repo_url': 'str',\n 'component': 'str',\n }\n\n self.attribute_map = {\n 'commit_hash': 'commit_hash',\n 'distro_hash': 'distro_hash',\n 'extended_hash': 'extended_hash',\n 'aggregate_hash': 'aggregate_hash',\n 'promote_name': 'promote_name',\n 'timestamp': 'timestamp',\n 'user': 'user',\n 'repo_hash': 'repo_hash',\n 'repo_url': 'repo_url',\n 'component': 'component',\n }\n\n self._commit_hash = commit_hash\n self._distro_hash = distro_hash\n self._extended_hash = extended_hash\n self._aggregate_hash = aggregate_hash\n self._promote_name = promote_name\n self._timestamp = timestamp\n self._user = user\n self._repo_hash = repo_hash\n self._repo_url = repo_url\n self._component = component", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'device_token': 'str',\n 'notification_id': 'str',\n 'make': 'str',\n 'model': 'str',\n 'accept_notifications': 'bool',\n 'type': 'str',\n 'session_hash': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'device_token': 'deviceToken',\n 'notification_id': 'notificationId',\n 'make': 'make',\n 'model': 'model',\n 'accept_notifications': 'acceptNotifications',\n 'type': 'type',\n 'session_hash': 'sessionHash',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._device_token = None\n self._notification_id = None\n self._make = None\n self._model = None\n self._accept_notifications = None\n self._type = None\n self._session_hash = None\n self._self_uri = None", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self, project_id, creds_file, bucket_name, location=\"us-west1\", storage_prefix=None):\n self.projectId = project_id\n self.location = location\n self.productClient = vision.ProductSearchClient.from_service_account_json(creds_file)\n self.imageClient = vision.ImageAnnotatorClient.from_service_account_file(creds_file)\n self.locationPath = self.productClient.location_path(project=project_id, location=location)\n self.storageClient = storage.Client.from_service_account_json(creds_file)\n self.bucket = self.storageClient.bucket(bucket_name)\n self.prefix = storage_prefix", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def __init__(self, attachment_name_property=None, attachment_type=None):\n self.__attachment_name = None\n self.__attachment_name_property = attachment_name_property\n self.__attachment_type = self._gk(attachment_type)", "def __init__(__self__, *,\n instance: pulumi.Input[str],\n charset: Optional[pulumi.Input[str]] = None,\n collation: Optional[pulumi.Input[str]] = None,\n deletion_policy: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"instance\", instance)\n if charset is not None:\n pulumi.set(__self__, \"charset\", charset)\n if collation is not None:\n pulumi.set(__self__, \"collation\", collation)\n if deletion_policy is not None:\n pulumi.set(__self__, \"deletion_policy\", deletion_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def __init__(self):\n\n self._base_url = \"\"\n self._hook = hooks.TransactionHook()\n self._client = clients.DEFAULT_CLIENT\n self._converter_factories = collections.deque()\n self._converter_factories.append(converter.StandardConverterFactory())", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def __init__(self, wm) -> None:\n conf_dict = wm.context.config.arango_storage._to_dict()\n\n log.debug(conf_dict)\n client = ArangoClient(hosts=conf_dict['hosts'])\n db = client.db(conf_dict['database'],\n username=conf_dict['username'],\n password=conf_dict['password'])\n\n self.db = db\n self.client = client", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def __init__(self):\n self.reader = vtk.vtkImageData()\n\n self.dims = self.reader.GetDimensions()\n self.bounds = self.reader.GetBounds()\n self.spacing = self.reader.GetSpacing()\n self.origin = self.reader.GetOrigin()\n self.value_range = self.reader.GetScalarRange()\n\n # self.plane_widget_x = vtk.vtkImagePlaneWidget()\n # self.plane_widget_y = vtk.vtkImagePlaneWidget()\n # self.plane_widget_z = vtk.vtkImagePlaneWidget()\n\n self.flag_read = False", "def __repr__(self):\r\n return \"IAMLTools.BlobProperties object.\"", "def __init__(self):\n self.package_name = 'pareceres'\n self.repository = config.get('ckanext.datadaemon.repository')\n self.file_url = None\n self.es_instance = es.ESIntegration()", "def CreateBlob(self, blob_key, blob):\n self._blobs[blobstore.BlobKey(unicode(blob_key))] = blob", "def __init__(self, config):\n \n self.gcs_client = storage.Client()\n self.data = pd.DataFrame()\n\n self.serie_name = config['serie']\n self.start_date = config['start_date']\n self.end_date = config['end_date']\n self.path = config['raw_path']\n self.bucket = config['bucket']", "def __init__(self):\n PrimaryObject.__init__(self)\n NoteBase.__init__(self)\n AddressBase.__init__(self)\n UrlBase.__init__(self)\n self.type = RepositoryType()\n self.name = \"\"", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def createWrapper():\n\n # read properties file and get MANO name and IP\n config = RawConfigParser()\n config.read(\"../../coreMano/coreMano.properties\")\n name = config.get(\"CoreMano\", \"coreMano.name\")\n host_ip = config.get(\"CoreMano\", \"coreMano.ip\")\n\n # instanciate and return the MANO\n if name == \"osm\":\n mano = OsmWrapper(name, host_ip)\n if name == \"cloudify\":\n mano = CloudifyWrapper(name, host_ip)\n return mano", "def __init__(self,\n application_parameters=None,\n enable_system_backup=None,\n file_paths=None,\n metadata_file_path=None,\n skip_nested_volumes_vec=None,\n uses_skip_nested_volumes_vec=None,\n volume_guid=None,\n windows_parameters=None,\n ):\n\n # Initialize members of the class\n self.application_parameters = application_parameters\n self.enable_system_backup = enable_system_backup\n self.file_paths = file_paths\n self.metadata_file_path = metadata_file_path\n self.skip_nested_volumes_vec = skip_nested_volumes_vec\n self.uses_skip_nested_volumes_vec = uses_skip_nested_volumes_vec\n self.volume_guid = volume_guid\n self.windows_parameters = windows_parameters", "def __init__(self):\n\n # the path to the file locally\n self.path = None\n # the file extension\n self.ext = None\n # image|video\n self.type = None\n ##\n # file title reference\n self.title = None\n # [image, gallery, video, performer]\n self.category = None\n # file size\n self.size = None", "def __init__(self, detector):\n self.base_dir = os.path.join(os.getcwd(), cfg.local[\"BASE_DB\"])\n self.images_dir = os.path.join(self.base_dir, cfg.local[\"IMG_DIR\"])\n self.X_filename = os.path.join(self.base_dir, cfg.data[\"X_NAME\"])\n self.y_filename = os.path.join(self.base_dir, cfg.data[\"y_NAME\"])\n self.le_filename = os.path.join(self.base_dir, cfg.models[\"LE_NAME\"])\n self.detector = detector\n\n if not os.path.exists(self.base_dir):\n os.mkdir(self.base_dir)\n\n if not os.path.exists(self.images_dir):\n os.mkdir(self.images_dir)\n\n #Load basic information here\n self.__initDataFromImages() #Init before load\n self.__loadPreProcessedData()", "def make_instance(cls):\r\n def get_value(name):\r\n if name in attributes:\r\n return attributes[name]\r\n else:\r\n value = cls['get'](name)\r\n return bind_method(value, instance)\r\n\r\n def set_value(name, value):\r\n attributes[name] = value\r\n\r\n attributes = {}\r\n instance = {'get': get_value, 'set': set_value}\r\n return instance", "def __init__(self, metadata_folder='./'):\n self.metadata = self.load_metadata(metadata_folder)\n self.prefix = 'data/CCD/frames/'\n return", "def __init__(self, filename=None):\r\n self._specs = self._load_spec(filename)\r\n self._properties = {}\r\n self._names = []\r\n self._defaults = {}", "def __init__(self):\n super(CatalogProxy, self).new_instance(\"catalog\", Catalog)", "def __init__(self):\n self.swagger_types = {\n 'cidr': 'str',\n 'service': 'str',\n 'region': 'str'\n }\n\n self.attribute_map = {\n 'cidr': 'cidr',\n 'service': 'service',\n 'region': 'region'\n }\n\n self._cidr = None\n self._service = None\n self._region = None", "def __init__(self, path):\n super().__init__()\n self.__cache = StorageCache()\n self.__db = InternalStorage(path)\n self.__serializer = Serializer()", "def __init__(self, name=None):\n\n conf = Config()[\"cloudmesh\"]\n super().__init__(name)\n\n self.user = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n self.spec = conf[\"cloud\"][name]\n self.cloud = name\n\n self.default = self.spec[\"default\"]\n self.cloudtype = self.spec[\"cm\"][\"kind\"]\n\n self.cred = self.spec[\"credentials\"]\n self.default = self.spec[\"default\"]\n self.project_id = self.cred[\"auth\"][\"project_id\"]\n\n # pprint(self.cred)\n\n self.cloudman = openstack.connection.Connection(**self.cred)\n\n # self.default_image = deft[\"image\"]\n # self.default_size = deft[\"size\"]\n # self.default.location = cred[\"datacenter\"]\n\n try:\n self.public_key_path = conf[\"profile\"][\"publickey\"]\n self.key_path = path_expand(\n Config()[\"cloudmesh\"][\"profile\"][\"publickey\"])\n f = open(self.key_path, 'r')\n self.key_val = f.read()\n except:\n raise ValueError(\"the public key location is not set in the \"\n \"profile of the yaml file.\")", "def __init__(self):\n\t\tcherrylog (\"Created Media Object of type: \" + str(self.__class__.__name__))\n\t\tself.mongo_connection = Connection() #This gets closed by the parent destructor\n\t\tself.bettermedia = self.mongo_connection['BetterMedia'] # `BetterMedia` database\n\t\tself.image_collection = self.bettermedia.image # `image` collection\n\t\tself.scene_collection = self.bettermedia.scene\n\t\tself.my_collection = self.bettermedia.video # `video` collection\n\t\tself.attributes = {} #Holds the important data for this object. This will be persisted to the Mongo DB.\n\t\tself.doc_id = \"\" #Holds a reference to the mongo document ID", "def __init__(self):\n self.swagger_types = {\n 'annotations': 'dict(str, str)',\n 'end_time': 'int',\n 'hosts': 'list[str]',\n 'is_ephemeral': 'bool',\n 'is_user_event': 'bool',\n 'name': 'str',\n 'start_time': 'int',\n 'summarized_events': 'int',\n 'table': 'str',\n 'tags': 'list[str]'\n }\n\n self.attribute_map = {\n 'annotations': 'annotations',\n 'end_time': 'endTime',\n 'hosts': 'hosts',\n 'is_ephemeral': 'isEphemeral',\n 'is_user_event': 'isUserEvent',\n 'name': 'name',\n 'start_time': 'startTime',\n 'summarized_events': 'summarizedEvents',\n 'table': 'table',\n 'tags': 'tags'\n }\n\n self._annotations = None\n self._end_time = None\n self._hosts = None\n self._is_ephemeral = False\n self._is_user_event = False\n self._name = None\n self._start_time = None\n self._summarized_events = None\n self._table = None\n self._tags = None", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def create_init(cls):\n if cls.instance is None:\n cls.instance = Initializer()\n return cls.instance", "def BriefDescriptorExtractor_create(bytes=None, use_orientation=None): # real signature unknown; restored from __doc__\n pass", "def __init__(self, project):\n super(NovaExtractor, self).__init__(project)\n\n self.nova = self._get_nova_client()\n self.glance = self._get_glance_client()\n self.neutron = self._get_neutron_client()\n\n self.flavors = self._get_flavors()\n self.images = self._get_images()", "def new(self):\n self._init()", "def __init__(self, os_creds, keypair_settings):\n super(self.__class__, self).__init__(os_creds)\n\n self.keypair_settings = keypair_settings\n self.__delete_keys_on_clean = True\n\n # Attributes instantiated on create()\n self.__keypair = None", "def create_instance(self, cls, **kwargs):\n\n if hasattr(cls, \"from_hdf_args\"):\n init_args = cls.from_hdf_args(self)\n else:\n init_args = {}\n\n init_args.update(kwargs)\n\n return cls(**init_args)", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def __init__(self, rdc):\n EphemeralProcessing.__init__(self, rdc)\n # Create the storage interface to store the exported resources\n self.storage_interface = rdc.create_storage_interface()", "def __init__(self):\n AUTHORIZATION_SCOPES = [\n 'https://www.googleapis.com/auth/devstorage.full_control']\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token_cloud_storage.pickle'):\n with open('token_cloud_storage.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n # If not successful, collecting new Token to access Calendar\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials_google.json', AUTHORIZATION_SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token_cloud_storage.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # Instantiates a client\n self.storage_client = storage.Client(\n project=\"piot-assignment2-287110\", credentials=creds)\n\n # Connect to bucket on Cloud Storage\n self.bucket = self.storage_client.get_bucket(\"facial_img\")", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def __init__(self, filename=None, image=None):\n self.image = image\n self.filename = filename\n self.metadata = Metadata()\n self.metadata['Convolved'] = False\n if self.image is None and filename is not None and os.path.exists(filename): # read the image from file\n self.from_file(filename)", "def __init__(self, name=None, file_origin=None, description=None, **annotations):\n # create `annotations` for additional arguments\n _check_annotations(annotations)\n self.annotations = annotations\n \n # these attributes are recommended for all objects.\n self.name = name\n self.description = description\n self.file_origin = file_origin", "def __init__(self, snapshot_metadata=None, flow_contents=None, external_controller_services=None, parameter_providers=None, parameter_contexts=None, flow_encoding_version=None, flow=None, bucket=None, latest=None):\n\n self._snapshot_metadata = None\n self._flow_contents = None\n self._external_controller_services = None\n self._parameter_providers = None\n self._parameter_contexts = None\n self._flow_encoding_version = None\n self._flow = None\n self._bucket = None\n self._latest = None\n\n self.snapshot_metadata = snapshot_metadata\n self.flow_contents = flow_contents\n if external_controller_services is not None:\n self.external_controller_services = external_controller_services\n if parameter_providers is not None:\n self.parameter_providers = parameter_providers\n if parameter_contexts is not None:\n self.parameter_contexts = parameter_contexts\n if flow_encoding_version is not None:\n self.flow_encoding_version = flow_encoding_version\n if flow is not None:\n self.flow = flow\n if bucket is not None:\n self.bucket = bucket\n if latest is not None:\n self.latest = latest", "def __init__(self):\n # try to load the container\n # cf will be global... \n # self.cf = pyrax.cloudfiles\n logging.debug(\"Opening cloudfiles container '%s'\" % self.container_name)\n notify(\"Reading environment configuration\")\n \n # check if our container exists; if not create it\n all_containers = cf.list_containers()\n \n if self.container_name in all_containers:\n logging.debug(\"Container exists, opening\")\n mycontainer = cf.get_container(self.container_name)\n else:\n logging.warn(\"Container doesn't exist, creating...\")\n mycontainer = cf.create_container(self.container_name)\n \n self.container = mycontainer\n \n if not self.load_footprints():\n logging.warn(\"No footprints loaded\")\n notify(\"No footprints found.\")", "def create_from_proposal(cls, proposal):\n obj = cls()\n obj.load_from_proposal(proposal)\n return obj", "def __init__(self):\n super(EventDataStream, self).__init__()\n self.file_entropy = None\n self.md5_hash = None\n self.path_spec = None\n self.sha1_hash = None\n self.sha256_hash = None\n self.yara_match = None", "def __new__(cls, manager, device_config, log_file_name, log_directory):\n # slowly migrate away from using 'hub_port_name' but maintain backwards compatibility\n if \"console_port_name\" not in device_config[\"persistent\"]:\n device_config[\"persistent\"][\"console_port_name\"] = \\\n device_config[\"persistent\"][\"hub_port_name\"]\n\n identifier = device_config[\"persistent\"][\"console_port_name\"]\n if identifier not in cls._instances:\n obj = super(Cambrionix, cls).__new__(cls)\n cls._instances[identifier] = obj\n\n return cls._instances[identifier]", "def init(self) -> None:", "def __init__(\n self, datetime,\n provider, asset_license,\n ext_properties\n ):\n self.ext_properties = ext_properties\n self.license = asset_license\n self.provider = provider\n self.datetime = datetime", "def __init__(self, name, attr=None):\n self.name = name\n self.propertiesstr = attr" ]
[ "0.6232803", "0.5880269", "0.58206016", "0.5818862", "0.5719443", "0.57131475", "0.5703017", "0.56779015", "0.5674141", "0.5668743", "0.5663582", "0.5659485", "0.565841", "0.56503475", "0.56440836", "0.5627367", "0.5611333", "0.5609648", "0.5596115", "0.5574799", "0.55605406", "0.55592597", "0.555619", "0.5555436", "0.55542076", "0.5533107", "0.55165255", "0.5513116", "0.55085367", "0.54995817", "0.54962677", "0.5476128", "0.54736406", "0.54720145", "0.54565775", "0.5454166", "0.54464686", "0.54459983", "0.543571", "0.54300565", "0.5429287", "0.54258484", "0.5408783", "0.5402256", "0.5399371", "0.5398129", "0.53792197", "0.5366818", "0.5356801", "0.535077", "0.5350457", "0.53440416", "0.534117", "0.53307444", "0.5320333", "0.5318911", "0.5317335", "0.53133935", "0.5309356", "0.5309356", "0.5293924", "0.5285119", "0.52823234", "0.52685994", "0.5267975", "0.52657574", "0.5264306", "0.52608275", "0.5255494", "0.5254786", "0.52492553", "0.52487135", "0.52474946", "0.5245092", "0.5244308", "0.523837", "0.52368104", "0.52355874", "0.5233203", "0.52313733", "0.5230107", "0.52291304", "0.5215358", "0.5214537", "0.52092737", "0.5205007", "0.5204471", "0.52037644", "0.520207", "0.5187003", "0.5183301", "0.5182033", "0.5180886", "0.5180324", "0.5179166", "0.51786417", "0.5178467", "0.5177327", "0.5176911", "0.5175128", "0.51727265" ]
0.0
-1
This constructor is never used by the system.
def __init__(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def __init__ (self):\n pass", "def __init__(self):\n raise NotImplementedError()", "def __init__(self):\r\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def __init__():", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self):\n raise", "def __init__(self):\n raise NoInitiation", "def __init__(self, **kwds):\n raise NotImplementedError", "def init(self) -> None:", "def __init__(self):\n raise Exception('TODO IMPLEMENT ME !')", "def _init(self):\n raise NotImplementedError", "def __init__ (self) :", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self) -> None:", "def __init__(self) -> None:", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n raise NotImplementedError()", "def _init(self):\n pass", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\n\n pass", "def __init__(self):\n\n pass", "def __init__(self):\n\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def initialize(self):\n\t\tpass", "def _init(self):", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass" ]
[ "0.82291555", "0.82291555", "0.82291555", "0.82291555", "0.8227341", "0.8077878", "0.8016458", "0.8003538", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79657507", "0.79415816", "0.79380023", "0.79380023", "0.79170954", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7872846", "0.7755447", "0.7703378", "0.76958543", "0.7682624", "0.7640777", "0.7619776", "0.76045203", "0.7603876", "0.75773144", "0.75773144", "0.7520182", "0.75017416", "0.7498635", "0.7492118", "0.7492118", "0.7492118", "0.7492118", "0.7439757", "0.7439757", "0.7439757", "0.7377193", "0.7377193", "0.7354847", "0.7354847", "0.7354847", "0.7354847", "0.7354847", "0.7354531", "0.73381674", "0.73274004", "0.73274004", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918", "0.7286918" ]
0.73578566
63
Gets an object representation in a string format.
def __repr__(self): return "IAMLTools.BlobProperties object."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_str(self, obj):\n if self.pretty:\n return pprint.pformat(obj)\n else:\n return str(obj)", "def objectToString(obj):\n if (hasattr(obj, \"__iter__\")):\n # matrix or vector\n if len(obj) == 0:\n return \"\"\n else:\n if (hasattr(obj[0], \"__iter__\")):\n # matrix\n return matrixToString(obj)\n else:\n # vector\n return tupleToString(obj)\n elif hasattr(obj, 'name'):\n return obj.name\n else:\n return str(obj)", "def str_(object_):\n return str(object_)", "def __str__(self):\n return str(self.__dict__['_obj'])", "def stringify(obj):\n tp = type(obj)\n if issubclass(tp, basestring):\n return obj\n elif hasattr(tp, '__unicode__'):\n s = tp.__unicode__(obj)\n if not isinstance(s, basestring):\n raise TypeError('__unicode__ did not return a string')\n return s\n elif hasattr(tp, '__str__'):\n s = tp.__str__(obj)\n if not isinstance(s, basestring):\n raise TypeError('__str__ did not return a string')\n return s\n else:\n return str(obj)", "def __str__(self):\n return str(self.obj)", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def obj_to_str(obj):\n try:\n # Automatically return repr() for a certain types to avoid going through the whole function\n # and then returning the repr() right at the end.\n repr_types = (str, astropy.io.fits.header.Header)\n # Ensure FITS headers are just treated as string representations rather than iterables\n except NameError:\n repr_types = str\n if isinstance(obj, repr_types):\n # Default cases\n return repr(obj)\n\n try:\n # Convert to give same string for 1, 1.0, 1.0000 etc.\n return repr(float(obj))\n except (TypeError, ValueError):\n pass\n\n if isinstance(obj, dict):\n # Normalise dict order by sorting\n obj = sorted([obj_to_str(k) + ': ' + obj_to_str(v) for k, v in obj.items()])\n return '{' + ', '.join(obj) + '}'\n\n if isinstance(obj, set):\n # Normalise set order by sorting, then treat as standard iterable\n obj = list(sorted(obj))\n\n try:\n try:\n # Try short circuit for all numeric case for performance reasons (this doesn't change\n # the output string, but can avoid repeated calling of obj_to_str() for e.g. numpy\n # arrays).\n return repr(list(map(float, obj)))\n except (TypeError, ValueError):\n pass\n # Treat all iterables the same and normalise contents\n return '[' + ', '.join([obj_to_str(x) for x in obj]) + ']'\n\n except TypeError:\n pass\n\n # Fallback to \"official\" string representation of object\n return repr(obj)", "def to_string(self):\r\n return self.__str__()", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())" ]
[ "0.79434896", "0.76730764", "0.74159396", "0.7268582", "0.7246455", "0.7231229", "0.7168203", "0.710605", "0.7084541", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156", "0.70546156" ]
0.0
-1
Calcule and return a list of strings specifying by properties.
def getContourProperties(self, contour, properties=[]): # Initial variables. failInInput = False props = {} for prop in properties: prop = str(prop).lower() if prop == "approximation": props.update({"Approximation" : self.__CalculateApproximation(contour)}) if prop == "area": props.update({"Area" : self.__CalculateArea(contour)}) elif prop == "boundingbox": props.update({"BoundingBox" : self.__CalculateBoundingBox(contour)}) elif prop == "centroid": props.update({"Centroid" : self.__CalculateCentroid(contour)}) elif prop == "circle": props.update({"Circle" : self.__CalculateCircle(contour)}) elif prop == "circularity": props.update({"Circularity" : self.__CalculateCircularity(contour)}) elif prop == "convexhull": props.update({"ConvexHull" : self.__CalculateConvexHull(contour)}) elif prop == "extend": props.update({"Extend" : self.__CalculateExtend(contour)}) elif prop == "ellipse": props.update({"Ellipse" : self.__CalculateEllipse(contour)}) elif prop == "isconvex": props.update({"IsConvex" : self.__IsConvex(contour)}) elif prop == "length": props.update({"Length" : self.__CalculateLength(contour)}) elif prop == "moments": props.update({"Moments" : self.__CalculateMoments(contour)}) elif prop == "perimeter": props.update({"Perimeter" : self.__CalculatePerimeter(contour)}) elif prop == "rotatedbox": props.update({"RotatedBox" : self.__CalculateRotatedBox(contour)}) elif failInInput: pass else: print("\t--" * 20) print("\t*** PROPERTY ERROR " + prop + " DOES NOT EXIST ***") print("\tTHIS ERROR MESSAGE WILL ONLY BE PRINTED ONCE") print("\--" * 20) failInInput = True return props
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getProperties(properties =['electrical_props', '__description'], \r\n sproperty ='electrical_props'):\r\n #------------------------------------\r\n from .database import GeoDataBase\r\n #-----------------------------------\r\n def _fs (v): \r\n \"\"\" Sanitize value and put on list \r\n :param v: value \r\n :Example:\r\n \r\n >>> _fs('(416.9, 100000.0)'))\r\n ...[416.9, 100000.0]\r\n \"\"\"\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n return v\r\n # connect to geodataBase \r\n try : \r\n _dbObj = GeoDataBase()\r\n except: \r\n _logger.debug('Connection to database failed!')\r\n else:\r\n _gammaVal = _dbObj._retreive_databasecolumns(properties)\r\n if sproperty in properties: \r\n indexEprops = properties.index(sproperty )\r\n try:\r\n _gammaVal [indexEprops] = list(map(lambda x:_fs(x),\r\n _gammaVal[indexEprops]))\r\n except TypeError:\r\n _gammaVal= list(map(lambda x:_fs(x),\r\n _gammaVal))\r\n return _gammaVal", "def _prop(self):\n return [\"%s = %s\" % (str(k), repr(v)) for k, v in self.prop.items()]", "def get_properties():", "def getPropertiesAll():", "def get_property_list(self,filtr):\n\n\n return self.dp.get_property_list(filtr)", "def extract_list(self, property, data):\n\n values = self.get_property(property, data)\n if len(values) == 1:\n return [ self.concat_text(child) for child in values[0].getchildren() ]\n else:\n return [ self.concat_text(val) for val in values ]", "def getProperties():", "def get_properties_code(self, obj):\n return []", "def _compile_props(props_text, grouped=False):\n props, prefixes = [], \"-webkit-,-khtml-,-epub-,-moz-,-ms-,-o-,\".split(\",\")\n for propline in props_text.strip().lower().splitlines():\n props += [pre + pro for pro in propline.split(\" \") for pre in prefixes]\n props = filter(lambda line: not line.startswith('#'), props)\n if not grouped:\n props = list(filter(None, props))\n return props, [0]*len(props)\n final_props, groups, g_id = [], [], 0\n for prop in props:\n if prop.strip():\n final_props.append(prop)\n groups.append(g_id)\n else:\n g_id += 1\n return (final_props, groups)", "def property_list_to_str(properties: th.PropertiesList) -> List[str]:\n return [name for (name, prop) in properties.items()]", "def transform_property_info_list(se, prop_list, output_type):\n props = [{\"description\": _prop.get(\"description\"),\n \"domain\": transform_schemaclasses_lst(se,\n _prop.get(\"domain\"),\n output_type),\n \"range\": transform_schemaclasses_lst(se,\n _prop.get(\"range\"),\n output_type),\n \"curie\": se.cls_converter.get_curie(_prop.get(\"uri\")),\n \"label\": se.cls_converter.get_label(_prop.get(\"uri\")),\n \"uri\": _prop.get(\"uri\"),\n \"object\": se.get_property(_prop.get(\"uri\"))} for _prop in prop_list]\n return props", "def get_properties_code(self, obj):\n # called only from generate_code_ctor when creating a class constructor to get the first lines\n # otherwise properties are part of the code returned by get_code\n prop_lines = []\n self._reset_vars()\n\n self._prepare_tmpl_content(obj)\n for line in self.tmpl_props:\n prop_lines.append(line % self.tmpl_dict)\n return prop_lines", "def _disp_props(self):\n ret = list()\n if self.required:\n ret.append('required')\n if self.default:\n ret.append('default=%s' % self.default)\n return ret", "def process_property(prop):\n output = {}\n output['Property'] = prop['PropertyAddress']\n output['Sale date'] = convert_date(prop['DateSold'])\n output['Sale price'] = convert_prices(prop['SalePrice'])\n output['Rates value'] = convert_prices(prop['CapitalValue'])\n return output", "def getPropertieList(self, propname, propindex):\n if propname in self._getPropName():\n try:\n return self.properties[propname][propindex]\n except:\n raise Exception(\"indice {0} incorrecto\".format(propindex))\n else:\n raise Exception(\"la propiedad {0} no existe\".format(propname))", "def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])", "def getProperties(self, prop_colour):\n props = database_creator.db.query(\n \"SELECT name FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n properties = []\n for i in props:\n properties.append(i[\"name\"])\n return properties", "def _get_properties(config: argparse.Namespace) -> tuple[set[str], set[str]]:\n property_classes = {BUILTIN_PROPERTY}\n property_names: set[str] = set() # Not returning 'property', it has its own check.\n if config is not None:\n property_classes.update(config.property_classes)\n property_names.update(\n prop.rsplit(\".\", 1)[-1] for prop in config.property_classes\n )\n return property_classes, property_names", "def get_result_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_result_property()]", "def getProperties(targets):", "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])", "def _get_fitness(self, mol):\n\n property_vector = []\n for property_fn in self._property_fns:\n logger.info(\n f'Using {property_fn.__name__} on \"{mol}\".'\n )\n property_vector.append(property_fn(mol))\n return property_vector", "def statistify(criteria):\n final = []\n for degree in criteria.keys():\n if degree == 'total':\n continue\n for num in range(0,criteria[degree]):\n final.append(int(degree.split('degree')[1]))\n return final", "def __str__(self):\n phases = '|'.join([phase.name for phase in PropertyPhase if self & phase])\n return phases", "def _PropList(self):\n prop_list = []\n\n if self.HASH_PROPERTIES is None and self.HASH_EXCLUDE is None:\n return prop_list\n\n # TODO(ckl): comprehensive list of \"internal\" properties\n exclude_list = self.HASH_EXCLUDE or tuple()\n exclude_list += metadata_api.GetFieldNames(self, ui_readonly=True)\n # TODO(raulg): The deleted can be removed from the exclude_list after all\n # records have been purged of deleted fields.\n exclude_list += ('deleted', 'key_subtype', 'key_order', 'key_name')\n\n for prop in self._properties:\n if '__' in prop and not prop.endswith('key_name'):\n continue\n if self.HASH_PROPERTIES is not None and prop not in self.HASH_PROPERTIES:\n continue\n if self.HASH_EXCLUDE is not None and prop in exclude_list:\n continue\n prop_list.append(prop)\n\n prop_list.sort()\n return prop_list", "def getProperties(groupId, contractId):\n\tprint \"Getting properties for group %s and contract %s\" % (groupId, contractId)\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties', property_parameters)\n\t\n\tif \"properties\" in property_result:\n\t\tproperty_items = property_result['properties']['items']\n\telse:\n\t\tproperty_items = []\n\n\treturn (property_items)", "def get_property(self, property, data):\n\n values = data.xpath(\"%s//*[@%s='%s']\" % (self.scope, self.attribute, property))\n if len(values) == 0:\n values = data.xpath(\"//*[@%s='%s']\" % (self.attribute, property))\n return values", "def _format_calc(self, outputs: Dict[str, np.array], system: System):\n results = {p: [] for p in self.required_properties}\n\n for output in outputs:\n for p in self.required_properties:\n # Check for convergence\n if output[p] is None:\n raise QMCalculatorError(\"Errors encountered during computation.\")\n\n results[p].append(torch.from_numpy(output[p]))\n\n for p in self.required_properties:\n results[p] = torch.stack(results[p]).to(system.device, system.dtype)\n\n return results", "def getComputedAttributes(self) -> list:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getComputedAttributes\")\n path = \"/config/computedAttributes\"\n res = self.connector.getData(self.endpoint + path)\n data = res[\"children\"]\n nextPage = res[\"_page\"].get(\"next\", \"\")\n # while nextPage != \"\":\n # res = self.connector.getData(self.endpoint+path,\n # params=params, headers=self.header)\n # data += res['children']\n # nextPage = res['_page'].get('next','')\n return res", "def get_inpProp(prop,iterable):\n for line in cleanStrings(iterable,CC='!'):\n llist = line.split()\n try:\n propIndex = index(prop,llist) + 1\n except ValueError:\n pass\n else:\n try:\n return float(llist[propIndex])\n except:\n raise ValueError", "def values(names, location, field = 0):\n table = read_properties(location)\n result = []\n for name in names:\n result.append(table[name][field])\n return result", "def get_properties(self):\n return self.properties", "def palues(self):\n return self[self.peys()]", "def palues(self):\n return self[self.peys()]", "def compute_smile_prop(smile):\n\n def compute_for_one(smi):\n\n \"\"\"\n Computes properties for a single smile sequence\n\n Inputs \n smi (str) : A sequence of smile characters\n Outputs\n prop (list): Computed properties, \"Not exist\" if properties cannot be computed\n \"\"\"\n\n try:\n mol=Chem.MolFromSmiles(smi) \n prop = [Descriptors.ExactMolWt(mol), Descriptors.MolLogP(mol), QED.qed(mol)]\n except:\n prop = 'Not exist!'\n return prop\n\n \n if isinstance(smile, (list, tuple)):\n all_list = []\n for s in list(smile):\n all_list.append(compute_for_one(s))\n props = all_list\n\n elif isinstance(smile, str):\n props = compute_for_one(smile) \n else:\n print(f\"Input must be a string or list, Instead got {type(smile)}\")\n \n return props", "def get_property_names(self, *, is_allprop):\n # Let default implementation return supported live and dead properties\n propNames = super().get_property_names(is_allprop=is_allprop)\n # Add fieldnames as properties\n tableName, primKey = self.provider._split_path(self.path)\n if primKey is not None:\n conn = self.provider._init_connection()\n fieldlist = self.provider._get_field_list(conn, tableName)\n for fieldname in fieldlist:\n propNames.append(\"{%s:}%s\" % (tableName, fieldname))\n conn.close()\n return propNames", "def _diagnostic_meta_properties_renderer(\n cls, result: Optional[ExpectationValidationResult] = None, **kwargs: dict\n ) -> Union[list, List[str], List[list]]:\n\n if not result:\n return []\n custom_property_values = []\n meta_properties_to_render: Optional[dict] = None\n if result and result.expectation_config:\n meta_properties_to_render = result.expectation_config.kwargs.get(\n \"meta_properties_to_render\"\n )\n if meta_properties_to_render:\n for key in sorted(meta_properties_to_render.keys()):\n meta_property = meta_properties_to_render[key]\n if meta_property:\n try:\n # Allow complex structure with . usage\n assert isinstance(\n result.expectation_config, ExpectationConfiguration\n )\n obj = result.expectation_config.meta[\"attributes\"]\n keys = meta_property.split(\".\")\n for i in range(0, len(keys)):\n # Allow for keys with a . in the string like {\"item.key\": \"1\"}\n remaining_key = \"\".join(keys[i:])\n if remaining_key in obj:\n obj = obj[remaining_key]\n break\n else:\n obj = obj[keys[i]]\n\n custom_property_values.append([obj])\n except KeyError:\n custom_property_values.append([\"N/A\"])\n return custom_property_values", "def get_comp_vals(self, propname):\n if not isinstance(propname, str):\n return propname\n if propname.endswith('*'):\n try:\n return self[propname]\n except KeyError:\n pass\n try:\n vals = {}\n for comp in self.components.values():\n vals[comp.name] = comp[propname]\n return vals\n except KeyError:\n msg = f'{propname} not found on at least one component'\n raise Exception(msg)", "def aggregate_input_properties(component_list):\n return combine_component_properties(component_list, 'input_properties')", "def get_item_properties(item, fields, mixed_case_fields=[], formatters={}):\r\n row = []\r\n\r\n for field in fields:\r\n if field in formatters:\r\n row.append(formatters[field](item))\r\n else:\r\n if field in mixed_case_fields:\r\n field_name = field.replace(' ', '_')\r\n else:\r\n field_name = field.lower().replace(' ', '_')\r\n if not hasattr(item, field_name) and isinstance(item, dict):\r\n data = item[field_name]\r\n else:\r\n data = getattr(item, field_name, '')\r\n if data is None:\r\n data = ''\r\n row.append(data)\r\n return tuple(row)", "def mdAveragePropertiesList(self):\n\t\tpass", "def parse_propspec(propspec):\n\n props = []\n\n for objspec in propspec:\n if ':' not in objspec:\n raise Exception('property specification \\'%s\\' does not contain '\n 'property list' % objspec)\n\n objtype, objprops = objspec.split(':', 1)\n\n motype = getattr(vim, objtype, None)\n\n if motype is None:\n raise Exception('referenced type \\'%s\\' in property specification '\n 'does not exist,\\nconsult the managed object type '\n 'reference in the vSphere API documentation' %\n objtype)\n\n proplist = objprops.split(',')\n\n props.append((motype, proplist,))\n\n return props", "def retrieve_properties_piped(item_list):\n # Creates the query by seperating each item with \"|\"\n item_list_query = \"\"\n for item in range(len(item_list)):\n if item == (len(item_list) - 1):\n item_list_query += item_list[item]\n else:\n item_list_query += item_list[item] + \"%7C\"\n\n # The string with API wbgetentities to find multiple items in an optimal format\n URL = \"https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&ids=%s&props=claims&languages=en&formatversion=2\" % (\n item_list_query)\n\n # Opens a HTMl session and gets the DATA from the API\n with requests.Session() as S:\n DATA = dict(S.post(url=URL, headers={\"user-agent\": \"magic browser\", \"Content-Type\": \"application/json\"}).json())\n\n # Appends the properties of each item to a nested list\n nested_list = []\n for entity in DATA[\"entities\"]:\n try:\n nested_list.append(list(DATA[\"entities\"][entity][\"claims\"].keys()))\n except:\n pass\n\n return nested_list", "def get_proplist(self, naam):\r\n h = self._root.find(naam)\r\n if h is None:\r\n h = []\r\n else:\r\n hh = h.findall(\"regel\")\r\n h = []\r\n for x in hh:\r\n if x.text is None:\r\n h.append(\"\")\r\n else:\r\n h.append(x.text.rstrip())\r\n return h", "def get_cached_property_names(self): # real signature unknown; restored from __doc__\n return []", "def properties(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Expression]:", "def __get_system_property_args_string(self, incremental_result):\n _method_name = '__get_system_property_args_string'\n self._logger.entering(incremental_result, class_name=self._class_name, method_name=_method_name)\n result = incremental_result\n for key, value in self.__sys_props.iteritems():\n if len(result) > 0:\n result += ' '\n result += '-D' + key\n if value is not None:\n result += '=' + value\n self._logger.exiting(class_name=self._class_name, method_name=_method_name, result=result)\n return result", "def get_computed_property_names(cls):\n computed_properties = {}\n\n for (property_name, instance) in cls._config_registry.iteritems():\n if property_name.startswith(COMPUTED_PROPERTY_PREFIX):\n computed_properties[property_name] = {\n 'description': instance.description\n }\n\n return computed_properties", "def read_properties_sp(lines):\n\n # TODO Better logging for crashed xtb\n if not read_status(lines):\n return None\n\n keywords = [\n \"final structure:\",\n \":: SUMMARY ::\",\n \"Property Printout \",\n \"ITERATIONS\",\n ]\n\n stoppattern = \"CYCLE \"\n idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)\n idxs[0]\n idx_summary = idxs[1]\n idx_end_summary = idxs[2]\n idxs[3]\n\n if idx_summary is None:\n # TODO Better fix\n assert False, \"uncaught xtb exception\"\n\n # Get atom count\n keyword = \"number of atoms\"\n idx = linesio.get_index(lines, keyword)\n line = lines[idx]\n n_atoms = line.split()[-1]\n n_atoms = int(n_atoms)\n\n # Get energies\n idx_summary = idxs[1] + 1\n\n # :: total energy +1\n # :: total w/o Gsasa/hb +2\n # :: gradient norm +3\n # :: HOMO-LUMO gap +4\n # ::.....................+4\n # :: SCC energy +5\n # :: -> isotropic ES +6\n # :: -> anisotropic ES +7\n # :: -> anisotropic XC +8\n # :: -> dispersion +9\n # :: -> Gsolv +10\n # :: -> Gborn +11\n # :: -> Gsasa +12\n # :: -> Ghb +13\n # :: -> Gshift +14\n # :: repulsion energy +15\n # :: add. restraining +16\n\n prop_lines = lines[idx_summary : idx_end_summary - 2]\n prop_dict = parse_sum_table(prop_lines)\n\n # total_energy = prop_dict.get(\"total_energy\", float(\"nan\"))\n # gsolv = prop_dict.get(\"gsolv\", float(\"nan\"))\n # electronic_energy = prop_dict.get(\"scc_energy\", float(\"nan\"))\n\n properties = prop_dict\n\n # Get dipole\n dipole_str = \"molecular dipole:\"\n idx = linesio.get_rev_index(lines, dipole_str)\n if idx is None:\n dipole_tot = None\n else:\n idx += 3\n line = lines[idx]\n line = line.split()\n dipole_tot = line[-1]\n dipole_tot = float(dipole_tot)\n\n properties = {\n COLUMN_DIPOLE: dipole_tot,\n **properties,\n }\n\n # Get covalent properties\n properties_covalent = read_covalent_coordination(lines)\n\n # Get orbitals\n properties_orbitals = read_properties_orbitals(lines)\n properties = {**properties, **properties_orbitals, **properties_covalent}\n\n return properties", "def get_plotable_result_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_plotable() and p.is_result_property()]", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_property_from_relations(\n self, relations: List[Relation], prop: str\n ) -> Set[str]:\n props = {rel.data[prop] for rel in relations if prop in rel.data}\n return props", "def get_compound_properties(path):\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]", "def test_list_properties(self):\n pass", "def getElementProperties():", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def getPropertyApprox(self, regexp):\n import re\n\n matches = {}\n pattern = re.compile( \".*\"+regexp+\".*\", re.IGNORECASE )\n\n for k in self.__properties:\n if isinstance( k, str) and pattern.match( k ):\n matches[k] = self.__properties[k]\n\n return matches", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def get_property_all_planets(self, property_name):\n return np.array([p.__dict__[property_name] for p in self.planets])", "def _clist(slist):\n retList = []\n if slist == None:\n return retList\n for p in slist:\n aobj = {}\n for prop in p.allProperties():\n if prop in IGNORED_PROPS:\n continue\n tmpval = p.valueForProperty_(prop)\n if type(tmpval) == ABMultiValueCoreDataWrapper:\n aval = [(_getVal(tmpval.labelAtIndex_(i)),\n _getVal(tmpval.valueAtIndex_(i)))\n for i in range(0, tmpval.count())]\n else:\n aval = _getVal(tmpval)\n if aval is not None:\n aobj[prop.lower()] = aval\n retList.append(aobj)\n return retList", "def computeProp(self):\n self.chem = {}\n for key in self.config.C:\n if key in ['P', 'T', 'Z', 'DZ']:\n continue\n self.chem[key] = chemistry.ConstituentProperties(key)\n\n # nAtm = len(self.gas[self.config.C['P']])\n self.property = []\n for op in self.config.LP:\n self.property.append([])\n zOffset = 0.0\n iOffset = 0\n psep = 1.0E6\n for i, zv in enumerate(self.gas[self.config.C['Z']]): # find the nearest z value at p_ref\n P = self.gas[self.config.C['P']][i]\n if abs(P - self.config.p_ref) < psep:\n psep = abs(P - self.config.p_ref)\n iOffset = i\n zOffset = self.gas[self.config.C['Z']][iOffset]\n z_at_p_ref = self.config.Req\n\n for i, zv in enumerate(self.gas[self.config.C['Z']]):\n T = self.gas[self.config.C['T']][i]\n P = self.gas[self.config.C['P']][i]\n self.property[self.config.LP['P']].append(P)\n self.property[self.config.LP['Z']].append(zv)\n rr = z_at_p_ref + zv - zOffset\n # note that this is the \"actual\"z along equator referenced to planet center (aka radius)\n self.property[self.config.LP['R']].append(rr)\n # ##set mean amu\n amulyr = 0.0\n for key in self.chem:\n amulyr += self.chem[key].amu * self.gas[self.config.C[key]][i]\n self.property[self.config.LP['AMU']].append(amulyr)\n # ##set GM pre-calc (normalized further down) and get lapse rate\n if not i:\n self.property[self.config.LP['GM']].append(0.0)\n self.property[self.config.LP['LAPSE']].append(0.0)\n self.property[self.config.LP['LAPSEP']].append(0.0)\n else:\n rho = (amulyr * P) / (chemistry.R * T)\n dr = abs(zv - self.gas[self.config.C['Z']][i - 1])\n dV = 4.0 * np.pi * (rr**2) * dr\n dM = 1.0e11 * rho * dV\n GdM = self.property[self.config.LP['GM']][i - 1] + chemistry.GravConst * dM\n # in km3/s2\n # mass added as you make way into atmosphere by radius r (times G)\n self.property[self.config.LP['GM']].append(GdM)\n dT = abs(T - self.gas[self.config.C['T']][i - 1])\n dP = abs(P - self.gas[self.config.C['P']][i - 1])\n self.property[self.config.LP['LAPSE']].append(dT / dr)\n self.property[self.config.LP['LAPSEP']].append(dT / dP)\n # ##set refractivity and index of refraction\n refrlyr = 0.0\n for key in self.chem:\n refrlyr += self.chem[key].refractivity(T=T) * self.gas[self.config.C[key]][i]\n refrlyr = refrlyr * P * (293.0 / T)\n self.property[self.config.LP['REFR']].append(refrlyr)\n nlyr = refrlyr / 1.0E6 + 1.0\n self.property[self.config.LP['N']].append(nlyr)\n\n # ##Now need to normalize GM to planet and calculate scale height (H)\n GMnorm = self.property[self.config.LP['GM']][iOffset] # G*(Mass added by p_ref)\n for i, mv in enumerate(self.property[self.config.LP['GM']]):\n gm = self.config.GM_ref - (mv - GMnorm)\n self.property[self.config.LP['GM']][i] = gm\n little_g = gm / self.property[self.config.LP['R']][i]**2\n m_bar = self.property[self.config.LP['AMU']][i]\n T = self.gas[self.config.C['T']][i]\n self.property[self.config.LP['H']].append((chemistry.R * T) /\n (little_g * m_bar) / 1000.0)\n self.property[self.config.LP['g']].append(little_g)\n self.property = np.array(self.property)", "def property_list_of_specific_unit(data_list, unit, counter= None, show = False):\n if not isinstance(counter, Counter):\n counter = Counter()\n \n total_list = check_from_specific_unit(data_list, unit, show)\n #print ('t', total_list)\n #prop_list = [data['Property'] for data in total_list]\n prop_list = map(lambda data: data['Property'], total_list)\n #print ('p', prop_list)\n counter.update(prop_list)\n return counter", "def getProperty(propname):", "def GetProperties(self, P, H, fracs, propList):\n if not self.parent:\n return None\n thCaseObj = self.parent.GetThermo()\n if not thCaseObj:\n return None\n \n thAdmin, prov, case = self.parent.GetThermoAdmin(), thCaseObj.provider, thCaseObj.case\n inProp1 = [P_VAR, P]\n inProp2 = [H_VAR, H]\n vals = thAdmin.GetProperties(prov, case, inProp1, inProp2, OVERALL_PHASE, fracs, propList) \n return vals", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def get_metric_list(self) -> List[str]:\n ...", "def load_criterias():\r\n l = [ (p.id, p.name) for p in StockProperty.objects.all() ]\r\n l.insert(0, ('', 'Select to add criteria ...'))\r\n return l", "def get_measures(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[2])\n return result", "def get_properties(self):\n return self.properties", "def get(self) -> list[float]:", "def getPropsListAsOSCMsgs(self):\r\n formatedMsg = []\r\n\r\n if BLENDER_MODE == 'BGE':\r\n polygonDict = self._getFacesAndMaterials()\r\n elif BLENDER_MODE == 'BPY':\r\n polygonDict = self._getFacesAndMaterials_bpy()\r\n\r\n for key in polygonDict.keys():\r\n faceID = str(key)\r\n matID = polygonDict[key]['material']\r\n p0123 = polygonDict[key]['vertices']\r\n msg = self._shapeFaceMsg(faceID,matID,p0123)\r\n formatedMsg.append(msg)\r\n return formatedMsg", "def property_autoparse(self, candidate_pattern, patterns):\n properties = None\n candidates = self.find_objects(regex=candidate_pattern)\n if len(candidates):\n properties = []\n else:\n return properties\n for candidate in candidates:\n properties.append(self.match_to_dict(line=candidate, patterns=patterns))\n return properties", "def propertyDetails(self):\n return (PROPERTY_DETAILS.get(aa, NONE) for aa in self.sequence)", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def listAllMemberProperties(self, exclude_props=[], include_props=None):\n if not self.is_compatible(): return []\n all_props = set([])\n for member in self.getAllMembers():\n user = member.getUser()\n for sheet in user.getOrderedPropertySheets():\n all_props = all_props | set(sheet.propertyIds())\n\n # property sheet hasn't id property, next we add it manually\n all_props = (all_props | set(['id'])) - set(exclude_props)\n if include_props: all_props = all_props & set(include_props)\n return list(all_props)", "def search_resources_for_property_value_violations(cfn, resource_type, prop_name, expected_prop_value) -> []:\n results = []\n for resource_name, resource_values in cfn.get_resources([resource_type]).items():\n path = ['Resources', resource_name, 'Properties']\n properties = resource_values.get('Properties')\n\n if prop_name not in properties.keys():\n results.append(path)\n return results\n\n if properties[prop_name] is not expected_prop_value:\n results.append(path + [prop_name])\n return results", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def list_property(property, all_articles, sort, sort_by):\n contents = {}\n for title in all_articles:\n try:\n if sort_by:\n key = all_articles[title][sort_by] \n value = all_articles[title][property]\n contents[key] = value\n else:\n key = all_articles[title][property]\n value = None\n contents[key] = value\n except KeyError:\n if sort_by:\n print(f\"'{property}' or '{sort_by}' isn't a valid item to list.\")\n else:\n print(f\"'{property}' isn't a valid item to list.\")\n valid_properties = \", \".join(all_articles[title].keys())\n print(f\"Choices are: {valid_properties}\")\n return\n \n all_keys = contents.keys()\n\n # Sort in-place if applicable.\n if sort or sort_by:\n all_keys = sorted(all_keys)\n \n # Print the output.\n for item_key in all_keys:\n if sort_by:\n print(f\"{item_key}: {contents[item_key]}\")\n else:\n print(item_key)", "def get_attributes(units, properties=[\"p_set\", \"q_set\"]):\n df = pd.DataFrame()\n for unit in units.items():\n for prop in properties:\n df.at[unit[0], prop] = getattr(unit[1], prop)\n return df", "def properties_get(self):\n return self._get('properties')", "def include_prop(self) -> List[str]:\n return self._include_prop", "def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll,\n specSet=spec_set,\n options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, \"token\") and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont", "def _get_params(self, fluents):\n objects_all = set()\n for fluent in fluents:\n objects = fluent.replace(\"(\",\"\").replace(\")\",\"\").split(\" \")[1:]\n objects_all.update(objects)\n\n return objects_all", "def getCriteriaItems( self ):\n # filter out empty strings\n result = []\n\n value = tuple( filter( None, self.value ) )\n if not value:\n return ()\n result.append( ( self.field, self.value ), )\n\n if self.operator is not None:\n result.append( ( '%s_operator' % self.field, self.operator ) )\n\n return tuple( result )", "def _getPropName(self):\n return self.properties.keys()", "def getProperties(self):\n return self.properties", "def render(prop):\n result = ['@%s' % prop.name]\n if prop.modifier != '':\n result.append(prop.modifier)\n if prop.ident != '':\n result.append(prop.ident)\n if prop.description != '':\n result.append(prop.description)\n return ' '.join(result)", "def getPropertyNamesAsStrings(self):\n return self._propertyStringNames", "def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)", "def read_properties_fukui(lines):\n\n keywords = [\"Fukui index Calculation\", \"f(+)\", \"Property Printout\"]\n\n indices = linesio.get_rev_indices_patterns(lines, keywords)\n\n if indices[0] is None:\n return None\n\n start_index = indices[1]\n end_index = indices[2]\n\n f_plus_list = []\n f_minus_list = []\n f_zero_list = []\n\n for i in range(start_index + 1, end_index - 1):\n line = lines[i]\n line = line.split()\n\n f_plus = float(line[1])\n f_minus = float(line[2])\n f_zero = float(line[3])\n\n f_plus_list.append(f_plus)\n f_minus_list.append(f_minus)\n f_zero_list.append(f_zero)\n\n f_plus_list = np.array(f_plus_list)\n f_minus_list = np.array(f_minus_list)\n f_zero_list = np.array(f_zero_list)\n\n properties = {\n \"f_plus\": f_plus_list,\n \"f_minus\": f_minus_list,\n \"f_zero\": f_zero_list,\n }\n\n return properties", "def getPropertyNames(self):\n return self._property_names", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def properties(self):", "def properties(self):", "def properties(self):", "def compute_string_properties(string):\n distinct = len(set(string))\n length = len(string)\n rev_list = [i for i in reversed(sorted(string))]\n return (length, rev_list, distinct)", "def extract_prop_from_var_lst(self, variable_lst, property):\n # | - extract_prop_from_var_lst\n # result = {}\n for i in variable_lst:\n if i[\"property\"] == property:\n return i[\"value\"]\n # __|", "def get_sale_prices():\n\n r = requests.post(settings.qv_url, data=REQUEST_DATA)\n response = r.json()\n\n data_processed = [process_property(prop) for prop in response['LocalAreaSales']]\n\n return data_processed", "def getPropertyOwners(self) -> List[unicode]:\n ..." ]
[ "0.66682273", "0.6444924", "0.6429075", "0.6418234", "0.62668264", "0.61048454", "0.6090979", "0.60708404", "0.60441667", "0.600167", "0.59521866", "0.5907654", "0.58133453", "0.5764389", "0.5727516", "0.57220525", "0.5717066", "0.56708825", "0.5670351", "0.56337535", "0.5605939", "0.55597013", "0.5543154", "0.553927", "0.55359674", "0.5519402", "0.5499797", "0.5484197", "0.5455684", "0.54532814", "0.54211974", "0.5416715", "0.5414089", "0.5414089", "0.53968805", "0.5376872", "0.53200984", "0.52971536", "0.52921", "0.52871424", "0.5272124", "0.5266893", "0.5261858", "0.5253608", "0.52485067", "0.5242579", "0.5233869", "0.52272373", "0.5223533", "0.52163434", "0.5209887", "0.5209445", "0.5205429", "0.52041745", "0.5199949", "0.51998967", "0.5196068", "0.51953256", "0.5182291", "0.51760197", "0.516864", "0.5163367", "0.51601064", "0.5158872", "0.5157191", "0.51553196", "0.5152939", "0.51438653", "0.51385635", "0.5135385", "0.51347625", "0.51287", "0.51272404", "0.5124703", "0.51237106", "0.51147676", "0.5113004", "0.5108856", "0.51025236", "0.5101537", "0.5097569", "0.5095639", "0.50848216", "0.5084386", "0.50791484", "0.50747216", "0.5063075", "0.50594246", "0.5055496", "0.50531983", "0.5043162", "0.5030749", "0.5017742", "0.50170547", "0.50170547", "0.50170547", "0.50125635", "0.5011424", "0.50100154", "0.50098" ]
0.5443529
30
Calculate the approximation of a contour shape to another shape with less number of vertices depending upon the precision we specify.
def __CalculateApproximation(self, contour): epsilon = 0.1 * cv2.arcLength(contour, True) return cv2.approxPolyDP(contour, epsilon, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contourApprox(cnt, epsilon = 0.005):\n\tepsilon = epsilon*cv2.arcLength(cnt, True)\n\tapprox = cv2.approxPolyDP(cnt, epsilon, True)\n\treturn approx", "def approx_poly(self, mask):\n\n mask_expand = mask.copy()\n contours, _ = cv2.findContours(mask_expand, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n approx_curve = []\n if self.max_area_only:\n contour_areas = [cv2.contourArea(contour) for contour in contours]\n if len(contour_areas) == 0:\n return []\n max_index = np.argmax(np.array(contour_areas))\n max_contour = contours[max_index]\n if self.use_rotated_box:\n # In minimum rotated rectangle\n min_rect = cv2.minAreaRect(max_contour)\n poly = cv2.boxPoints(min_rect)\n poly = np.int0(poly)\n else:\n # In polygon contours\n perimeter = cv2.arcLength(max_contour, True) * 0.01\n poly = cv2.approxPolyDP(max_contour, perimeter, True)\n approx_curve.append(poly)\n else:\n for contour in contours:\n perimeter = cv2.arcLength(contour, True) * 0.01\n poly = cv2.approxPolyDP(contour, perimeter, True)\n approx_curve.append(poly)\n return approx_curve", "def fun_contours(self, params):\n shape_coeffs = params[:self.num_shape_params]\n blendshape_end = self.num_shape_params + self.numObservations * self.num_blendshape_params\n blendshape_coeffs = params[self.num_shape_params:blendshape_end].reshape((self.numObservations, self.num_blendshape_params))\n trans_mats = params[blendshape_end:].reshape((self.numObservations, 7))\n\n vertices3d = self.vertices3d\n vertices3d_from_mesh = np.zeros_like(vertices3d)\n vertices3d_inner, vertices3d_right, vertices3d_left = self.transform_meshes(shape_coeffs, blendshape_coeffs, trans_mats)\n\n inner_idx = 0\n for idx in range(vertices3d.shape[0]):\n lm_idx = idx % 66\n obs_num = int(np.floor(idx/66))\n\n if lm_idx in self.contour_lms_list[0]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_right[obs_num])\n elif lm_idx in self.contour_lms_list[1]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_left[obs_num])\n else:\n vertices3d_from_mesh[idx] = vertices3d_inner[obs_num][inner_idx]\n inner_idx += 1\n if inner_idx == 50:\n inner_idx = 0\n\n return (vertices3d_from_mesh - vertices3d).ravel()", "def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]", "def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)", "def fracture(self, max_points=_max_points, precision=1e-3):\n if max_points > 4:\n ii = 0\n while ii < len(self.polygons):\n if len(self.polygons[ii]) > max_points:\n pts0 = sorted(self.polygons[ii][:, 0])\n pts1 = sorted(self.polygons[ii][:, 1])\n ncuts = len(pts0) // max_points\n if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:\n # Vertical cuts\n cuts = [\n pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 0,\n 1 / precision)\n else:\n # Horizontal cuts\n cuts = [\n pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 1,\n 1 / precision)\n self.polygons.pop(ii)\n layer = self.layers.pop(ii)\n datatype = self.datatypes.pop(ii)\n self.polygons.extend(\n numpy.array(x)\n for x in itertools.chain.from_iterable(chopped))\n npols = sum(len(c) for c in chopped)\n self.layers.extend(layer for _ in range(npols))\n self.datatypes.extend(datatype for _ in range(npols))\n else:\n ii += 1\n return self", "def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n return self.parent().monoid().minimal_composition_filter( self.coefficients().keys(),\n [self.parent().monoid().zero_element()] )", "def getContours(image, copyImage):\n contours, heirarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for contour in contours:\n area = cv.contourArea(contour)\n \n if area > 500.0:\n cv.drawContours(copyImage, contour, -1, (255,0,0),3)\n perimeter = cv.arcLength(contour, True)\n \n # Approximates to the nearest polygon\n approx = cv.approxPolyDP(contour,0.02*perimeter, True)\n objectCoordinates = len(approx)\n\n # Returns the x, y and height width of the polygon\n x, y, w, h = cv.boundingRect(approx)\n\n if objectCoordinates == 3:\n objectShape = \"Triangle\"\n elif objectCoordinates == 4:\n ratio = w / float(h)\n if ratio >= 0.95 and ratio <= 1.05:\n objectShape = \"Square\"\n else: objectShape = \"Rectangle\"\n else: objectShape = \"Circle\" \n\n \n\n # Draw rectangles around the images\n cv.rectangle(copyImage, (x,y), (x+w, y+h), (0,255,0), 2)\n cv.putText(copyImage, objectShape, (x + (w//2), y + (h//2)),cv.FONT_HERSHEY_COMPLEX, 0.5, (0,0,0))", "def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def compute_coverage_for_contour_pair(\n contour1: np.ndarray,\n contour2: np.ndarray,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n):\n im1, im2 = compute_contour_binary_masks(contour1, contour2, max_size=max_size)\n return (im1 & im2).sum() / im1.sum()", "def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width", "def setup_contour_input():\n from bfieldtools.utils import load_example_mesh\n\n mesh = load_example_mesh(\"unit_disc\")\n\n r = np.linalg.norm(mesh.vertices, axis=1)\n scalars = (1 - r) ** 2\n scalars *= mesh.vertices[:, 0]\n\n return mesh, scalars", "def __CalculateExtend(self, contour):\r\n area = self.__CalculateArea(contour)\r\n boundingBox = self.__CalculateBoundingBox(contour)\r\n return area / (boundingBox[2] * boundingBox[3])", "def func_curvature(self):\n return u.Curvature.CONVEX", "def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y", "def curvature(contour,fn = 3, bn = 3):\n\n clen = contour.shape[0]\n E = np.zeros((clen,), np.float32)\n thetai = np.zeros((clen,), np.float32)\n\n for k in range(1,clen):\n \n # first and last few points\n if k < bn:\n bnd = 0\n fnd = k + fn\n elif k + fn > clen-1:\n bnd = k - bn\n fnd = clen-1\n else:\n bnd = k - bn\n fnd = k + fn\n\n # calculate curvature\n lb = math.sqrt( (contour[k,0]-contour[bnd,0])**2 + (contour[k,1]-contour[bnd,1])**2 )\n lf = math.sqrt( (contour[k,0]-contour[fnd,0])**2 + (contour[k,1]-contour[fnd,1])**2 )\n\n if contour[k,1]-contour[bnd,1]!=0:\n thetab=math.atan( np.double(abs(contour[k,0]-contour[bnd,0])) / np.double(abs(contour[k,1]-contour[bnd,1])) )\n else:\n thetab=math.atan( np.double(abs(contour[k,0]-contour[bnd,0])) / np.double(abs(contour[k,1]-contour[bnd,1])) )\n thetab = math.pi/2 - thetab\n\n if contour[k,1]-contour[fnd,1]!=0:\n thetaf=math.atan( np.double(abs(contour[k,0]-contour[fnd,0])) / np.double(abs(contour[k,1]-contour[fnd,1])) )\n else:\n thetaf=math.atan( np.double(abs(contour[k,0]-contour[fnd,0])) / np.double(abs(contour[k,1]-contour[fnd,1])) )\n thetaf = math.pi/2 - thetaf\n\n thetai[k]=(thetab+thetaf)/2\n detlaf=abs(thetaf-thetai[k])\n detlab=abs(thetai[k]-thetab)\n E[k]=detlaf/lf/2+detlab/lb/2\n\n E[0]=E[1]\n E[clen - 1]=E[clen - 2]\n thetai[0]=thetai[1]\n thetai[clen - 1]=thetai[clen - 2]\n\n return (E,thetai)", "def _mn_contour_ ( self , npoint , par1 , par2 , nsigma = 1 ) :\n if npoint < 4 : raise ValueError ( 'contour: npoint (%s) must be >= 4' % npoint )\n if not par1 in self : raise ValueError ( 'contour: par1(%s) is not in Minuit' % par1 )\n if not par2 in self : raise ValueError ( 'contour: par2(%s) is not in Minuit' % par2 )\n if par1 == par2 : raise ValueError ( 'contour: par1 == par2(%s) ' % par2 )\n #\n ## save old error defintion\n #\n old_err_def = self.GetErrorDef()\n #\n ## set new error definition\n #\n self.SetErrorDef ( nsigma * nsigma )\n \n graph = self.Contour ( npoint , par1 , par2 )\n\n #\n ## restore old error defininion\n #\n status = self.GetStatus()\n self.SetErrorDef ( old_err_def ) \n #\n if graph and 0 == status : return graph\n logger.error ( 'TMinuit::Contour: status %i' % status ) \n return graph", "def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def find_optimal_components_subset(contours, edges):\n c_info = props_for_contours(contours, edges)\n c_info.sort(key=lambda x: -x['sum'])\n total = np.sum(edges) / 255\n area = edges.shape[0] * edges.shape[1]\n\n c = c_info[0]\n del c_info[0]\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n crop = this_crop\n covered_sum = c['sum']\n\n while covered_sum < total:\n changed = False\n recall = 1.0 * covered_sum / total\n prec = 1 - 1.0 * crop_area(crop) / area\n f1 = 2 * (prec * recall / (prec + recall))\n # print '----'\n for i, c in enumerate(c_info):\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n new_crop = union_crops(crop, this_crop)\n new_sum = covered_sum + c['sum']\n new_recall = 1.0 * new_sum / total\n new_prec = 1 - 1.0 * crop_area(new_crop) / area\n new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)\n\n # Add this crop if it improves f1 score,\n # _or_ it adds 25% of the remaining pixels for <15% crop expansion.\n # ^^^ very ad-hoc! make this smoother\n remaining_frac = c['sum'] / (total - covered_sum)\n new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1\n if new_f1 > f1 or (\n remaining_frac > 0.25 and new_area_frac < 0.15):\n print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (\n i, covered_sum, new_sum, total, remaining_frac,\n crop_area(crop), crop_area(new_crop), area, new_area_frac,\n f1, new_f1))\n crop = new_crop\n covered_sum = new_sum\n del c_info[i]\n changed = True\n break\n\n if not changed:\n break\n\n return crop", "def projection_fm(poly1, new_dim, del_dim, abs_tol=ABS_TOL):\n # Remove last dim first to handle indices\n del_dim = -np.sort(-del_dim)\n if not poly1.minrep:\n poly1 = reduce(poly1)\n poly = poly1.copy()\n for i in del_dim:\n positive = np.nonzero(poly.A[:, i] > abs_tol)[0]\n negative = np.nonzero(poly.A[:, i] < -abs_tol)[0]\n null = np.nonzero(np.abs(poly.A[:, i]) < abs_tol)[0]\n nr = len(null) + len(positive) * len(negative)\n nc = np.shape(poly.A)[0]\n C = np.zeros([nr, nc])\n A = poly.A[:, i].copy()\n row = 0\n for j in positive:\n for k in negative:\n C[row, j] = -A[k]\n C[row, k] = A[j]\n row += 1\n for j in null:\n C[row, j] = 1\n row += 1\n keep_dim = np.setdiff1d(\n range(poly.A.shape[1]),\n np.array([i]))\n poly = Polytope(\n np.dot(C, poly.A)[:, keep_dim],\n np.dot(C, poly.b))\n if not is_fulldim(poly):\n return Polytope()\n poly = reduce(poly)\n return poly", "def poly_enclose(points, color, inc=1.2, rad=0.3, lw=2):\n points = np.log(points)\n hull = ConvexHull(points)\n\n cent = np.mean(points, 0)\n pts = []\n for pt in points[hull.simplices]:\n pts.append(pt[0].tolist())\n pts.append(pt[1].tolist())\n \n pts.sort(key=lambda p: np.arctan2(p[1] - cent[1],\n p[0] - cent[0]))\n pts = pts[0::2] # Deleting duplicates\n pts.insert(len(pts), pts[0])\n \n \n verts = inc*(np.array(pts)- cent) + cent\n verts2 = np.zeros((3*verts.shape[0]-2,2))\n verts2[0::3] = verts\n verts2[1::3,:] = (1-rad)*verts[0:-1,:] + rad*verts[1:,:]\n verts2[2::3,:] = rad*verts[0:-1,:] + (1-rad)*verts[1:,:]\n verts2[0:-1] = verts2[1:]\n verts2[-1] = verts2[0]\n\n\n \n codes = [Path.MOVETO, Path.LINETO, Path.CURVE3,]\n for j in range(len(pts)-2):\n codes.extend([Path.CURVE3, Path.LINETO, Path.CURVE3,])\n codes.append(Path.CURVE3)\n \n \n path = Path(verts2, codes)\n patch = patches.PathPatch(path, facecolor=color, lw=0, alpha=0.2)\n edge = patches.PathPatch(path, edgecolor=color, facecolor='none', lw=lw)\n patch._path._vertices = np.exp(patch._path._vertices)\n return patch, edge", "def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)", "def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10):\n points = []\n length = 0\n step = 1.0 / precision\n factors = range(0, precision + 1)\n for i in factors:\n points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3))\n for i in range(len(points) - 1):\n pta = points[i]\n ptb = points[i + 1]\n length += _distance(pta, ptb)\n return length", "def compute(self, *args, **kwargs):\n vertices = args[0]\n xpts = vertices[2] # z plays the 'x' part\n ypts = vertices[0] # x plays the 'y' part\n #zpts = vertices[1]\n #********************************************\n # switcharoo: using z in place of x\n # using x in place of y\n # i.e.\n #\n # y <- x\n # x <- z\n #\n qxdot = np.dot(xpts,self.localBasis[1])\n qxddot = np.dot(xpts,self.localBasis[2])\n qydot = np.dot(ypts,self.localBasis[1])\n qyddot = np.dot(ypts,self.localBasis[2])\n store = (qxdot*qyddot - qydot*qxddot)\n temp = np.sqrt(qxdot**2 + qydot**2)\n if isinstance(temp, ia):\n if temp.inf<=0:\n temp.inf = 0.\n denom = temp*((temp)**2)#**.5## #problem foud with sqrt\n #\n curvature = store/denom#((np.sqrt(qxdot*qxdot + qydot*qydot))**3.)\n return curvature", "def show_conts(cont, shape, tolerance):\n cont_image = np.zeros(shape)\n approx_image = np.zeros(shape)\n rr, cc = polygon_perimeter(cont[:, 0], cont[:, 1])\n cont_image[rr, cc] = 1\n poly_approx = approximate_polygon(cont, tolerance=tolerance)\n rra, cca = polygon_perimeter(poly_approx[:, 0], poly_approx[:, 1])\n approx_image[rra, cca] = 1\n plt.imshow(cont_image)\n plt.show()\n plt.imshow(approx_image)\n plt.show()", "def get_corners_from_contours(contours, corner_amount=4):\n\tcoefficient = .05\n\tepsilon = coefficient * cv2.arcLength(contours, True)\n\n\twhile True:\n\t\t# print(contours)\n\t\tprint(\"epsilon:\", epsilon)\n\n\t\tpoly_approx = cv2.approxPolyDP(contours, epsilon, True)\n\t\t\n\t\t#Выпуклая оболочка, описывающая точки poly_approx\n\t\thull = cv2.convexHull(poly_approx)\n\t\tif len(hull) == corner_amount:\n\t\t\treturn hull\n\t\telse:\n\t\t\tif len(hull) > corner_amount:\n\t\t\t\tcoefficient += .01\n\t\t\telse:\n\t\t\t\tcoefficient -= .01\n\t\tepsilon = coefficient * cv2.arcLength(contours, True)\n\t\tif epsilon < 0: return hull", "def restrict(self):\n\n cg = self.grid.coarse_like(2)\n\n c_edge_coeffs = EdgeCoeffs(cg, None, empty=True)\n\n c_eta_x = cg.scratch_array()\n c_eta_y = cg.scratch_array()\n\n fg = self.grid\n\n c_eta_x[cg.ilo:cg.ihi+2,cg.jlo:cg.jhi+1] = \\\n 0.5*(self.x[fg.ilo:fg.ihi+2:2,fg.jlo :fg.jhi+1:2] +\n self.x[fg.ilo:fg.ihi+2:2,fg.jlo+1:fg.jhi+1:2])\n\n # redo the normalization\n c_edge_coeffs.x = c_eta_x*fg.dx**2/cg.dx**2\n\n c_eta_y[cg.ilo:cg.ihi+1,cg.jlo:cg.jhi+2] = \\\n 0.5*(self.y[fg.ilo :fg.ihi+1:2,fg.jlo:fg.jhi+2:2] +\n self.y[fg.ilo+1:fg.ihi+1:2,fg.jlo:fg.jhi+2:2])\n\n c_edge_coeffs.y = c_eta_y*fg.dy**2/cg.dy**2\n\n return c_edge_coeffs", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def triangle_contour(x_center, y_center, values, smoothing, ckwargs={}):\n # make Triangulation object using the centers of each of the hexbins\n triag = Triangulation(x_center, y_center)\n refiner = UniformTriRefiner(triag) # refines the mesh of triangle\n # returns refines triangle field of triangles and interpolated\n # contour values by dividing each triangle into 4**subdiv triangles\n tri_refi, c_refi = refiner.refine_field(values, subdiv=smoothing)\n T = pl.tricontour(tri_refi, c_refi, **ckwargs)\n return T", "def isConvexApproximate(data, boundaryPointsDict, triangleDict, approximation, tolerance):\n outliersAllowed = int(np.floor(tolerance * len(list(boundaryPointsDict.keys()))))\n\n outliersCount = 0\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= approximation:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n outliersCount += 1\n if outliersCount > outliersAllowed:\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def test_defect_calculation():\n slope1, slope2 = 2., 3.\n step1, step2 = Fraction(5), Fraction(7)\n cosim = ramp_cosimulation(slope1, slope2, step1, step2)\n t_end = Fraction(20)\n defect = cs.evaluate(cosim, t_end)\n\n alpha = Fraction(int(lcm(step1.numerator, step2.numerator)),\n int(gcd(step1.denominator, step2.denominator)))\n num1, num2 = tuple(map(int, [alpha / step for step in (step1, step2)]))\n big = max(num1, num2) + 1\n small = min(num1, num2) - 1\n assert defect.connection['Ramp1', 'u'] > small * slope2 * step2\n assert defect.connection['Ramp1', 'u'] < big * slope2 * step2\n assert defect.connection['Ramp2', 'u'] > small * slope1 * step1\n assert defect.connection['Ramp2', 'u'] < big * slope1 * step1\n\n assert defect.output['Ramp1', 'y'] == pytest.approx(slope1 * step1)\n assert defect.output['Ramp2', 'y'] == pytest.approx(slope2 * step2)", "def test_spheroid_convexity(spheroid_convex_fixture):\n assert(spheroid_convex_fixture.convex_p() == pytest.approx(1.0))\n assert(spheroid_convex_fixture.linear_p() == pytest.approx(0.0))", "def convex(self, *args, **kwargs) -> Any:\n pass", "def update_contour():\n global contour_center\n global contour_area\n\n image = rc.camera.get_color_image()\n\n if image is None:\n contour_center = None\n contour_area = 0\n else:\n # Find all of the orange contours\n contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])\n\n # Select the largest contour\n contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)\n\n if contour is not None:\n # Calculate contour information\n contour_center = rc_utils.get_contour_center(contour)\n contour_area = rc_utils.get_contour_area(contour)\n\n # Draw contour onto the image\n rc_utils.draw_contour(image, contour)\n rc_utils.draw_circle(image, contour_center)\n\n else:\n contour_center = None\n contour_area = 0\n\n # Display the image to the screen\n rc.display.show_color_image(image)", "def reCurveFromEntireInputContour(self, inputContour):\n if self.clockwise:\n inputFlat = inputContour.clockwiseFlat\n else:\n inputFlat = inputContour.counterClockwiseFlat\n outputFlat = []\n for segment in self.segments:\n # XXX this could be expensive\n assert segment.segmentType == \"flat\"\n outputFlat += segment.points\n # test lengths\n haveMatch = False\n if len(inputFlat) == len(outputFlat):\n if inputFlat == outputFlat:\n haveMatch = True\n else:\n inputStart = inputFlat[0]\n if inputStart in outputFlat:\n # there should be only one occurance of the point\n # but handle it just in case\n if outputFlat.count(inputStart) > 1:\n startIndexes = [index for index, point in enumerate(outputFlat) if point == inputStart]\n else:\n startIndexes = [outputFlat.index(inputStart)]\n # slice and dice to test possible orders\n for startIndex in startIndexes:\n test = outputFlat[startIndex:] + outputFlat[:startIndex]\n if inputFlat == test:\n haveMatch = True\n break\n if haveMatch:\n # clear out the flat points\n self.segments = []\n # replace with the appropriate points from the input\n if self.clockwise:\n inputSegments = inputContour.clockwiseSegments\n else:\n inputSegments = inputContour.counterClockwiseSegments\n for inputSegment in inputSegments:\n self.segments.append(\n OutputSegment(\n segmentType=inputSegment.segmentType,\n points=[\n OutputPoint(\n coordinates=point.coordinates,\n segmentType=point.segmentType,\n smooth=point.smooth,\n name=point.name,\n kwargs=point.kwargs\n )\n for point in inputSegment.points\n ],\n final=True\n )\n )\n inputSegment.used = True\n # reset the direction of the final contour\n self.clockwise = inputContour.clockwise\n return True\n return False", "def approx_snap_contour(contour, width, height, epsilon=20, snap_dist=5):\n # approximate contour within epsilon pixels,\n # so it isn't too fine in the corner\n # and snap to edges\n approx = cv2.approxPolyDP(contour, epsilon, True)\n for i in range(approx.shape[0]):\n for j in [0, width]:\n if np.abs(approx[i, 0, 0] - j) <= snap_dist:\n approx[i, 0, 0] = j\n for j in [0, height]:\n if np.abs(approx[i, 0, 1] - j) <= snap_dist:\n approx[i, 0, 1] = j\n return approx", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x,y,w,h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / max(cv2.contourArea(hull),1)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def find_document_contour(edged_image):\n # find contours from an edge-detected image, and choose the ones with the largest area\n contours = cv2.findContours(edged_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]\n contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5]\n\n for cont in contours:\n arc_length = cv2.arcLength(cont, True)\n approx_curve = cv2.approxPolyDP(cont, 0.02 * arc_length, True)\n\n if len(approx_curve) == 4:\n print(approx_curve)\n return approx_curve\n\n raise ValueError()", "def poly2mask(self):\n self.x_gridnum = int((self.x_range[1] - self.x_range[0]) / self.x_gridsize)\n self.y_gridnum = int((self.y_range[1] - self.y_range[0]) / self.y_gridsize)\n img = Image.new(\"L\", (self.x_gridnum, self.y_gridnum), 0)\n\n self.perimeter = 0.0\n for ii in self.polygons:\n pp = np.array(ii) * self.CD # polygon\n polygonlen = len(pp)\n self.perimeter += np.sum(np.abs(pp[0:-1] - pp[1:polygonlen]))\n pp[:, 0] = (pp[:, 0] - self.x_range[0]) / self.x_gridsize\n pp[:, 1] = (pp[:, 1] - self.y_range[0]) / self.y_gridsize\n vetex_list = list(pp)\n polygon = [tuple(y) for y in vetex_list]\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\n\n self.data = np.array(img)\n self.data = np.float64(self.data)\n\n self.spat_part = pyfftw.empty_aligned(\n (self.y_gridnum, self.x_gridnum), dtype=\"complex128\"\n )\n self.freq_part = pyfftw.empty_aligned(\n (self.y_gridnum, self.x_gridnum), dtype=\"complex128\"\n )\n self.fft_mask = pyfftw.FFTW(self.spat_part, self.freq_part, axes=(0, 1))", "def _arc_approximation(self, src, target, dim, tol=0.5):\n dim = round(dim)\n self.debug(\"target convexity={}\".format(target.convexity))\n tx, ty = self._get_frame_center(src)\n dx, dy = None, None\n color = (150, 100, 50)\n\n if target.convexity > tol:\n dim *= 1.1\n # self.info('doing arc approximation radius={}'.format(dim))\n pts = target.poly_points\n # pts[:, 1] = pts[:, 1] - ty\n # pts[:, 0] = pts[:, 0] - tx\n # args = approximate_polygon_center(pts, dim)\n h, w = src.shape[0], src.shape[1]\n\n pts = list(reversed(convex_hull(pts)))\n draw_polygons(src, [pts], color=(0, 200, 0), thickness=1)\n\n args = approximate_polygon_center3(pts, dim, w, h)\n if args:\n cx, cy, cpts = args\n self.debug(\n \"arc_approximation cx={}, cy={}, cpts={}\".format(cx, cy, cpts)\n )\n\n if (\n cx is not None\n and not isnan(cx)\n and cy is not None\n and not isnan(cy)\n ):\n for cpt in cpts:\n self._draw_indicator(src, cpt, size=1)\n\n dx = cx - tx\n dy = cy - ty\n\n dy = -dy\n color = (255, 0, 128)\n else:\n self.debug(\"arc approximation failed\")\n dx, dy = self._calculate_error([target])\n cx, cy = dx + tx, -dy + ty\n else:\n self.debug(\"target convexity too low\")\n dx, dy = self._calculate_error([target])\n cx, cy = dx + tx, dy + ty\n\n if target.convexity > tol and dx is not None and dy is not None:\n draw_circle_perimeter(src, cx, cy, dim, color)\n self._draw_indicator(\n src,\n (cx, cy),\n color=color,\n shape=\"crosshairs\",\n size=round(dim),\n thickness=1,\n )\n\n return dx, dy", "def ricciCurvature_singleEdge(G, source, target, alpha, length):\n\n EPSILON = 1e-7 # to prevent divided by zero\n\n assert source != target, \"Self loop is not allowed.\" # to prevent self loop\n\n # If the weight of edge is too small, return the previous Ricci Curvature instead.\n if length[source][target] < EPSILON:\n assert \"ricciCurvature\" in G[source][target], \"Divided by Zero and no ricci curvature exist in Graph!\"\n print(\"Zero Weight edge detected, return previous ricci Curvature instead.\")\n return G[source][target][\"ricciCurvature\"]\n\n source_nbr = list(G.neighbors(source))\n target_nbr = list(G.neighbors(target))\n\n assert len(source_nbr) > 0, \"srcNbr=0?\"\n assert len(target_nbr) > 0, \"tarNbr=0?\" + str(source) + \" \" + str(target)\n x = [(1.0 - alpha) / len(source_nbr)] * len(source_nbr)\n y = [(1.0 - alpha) / len(target_nbr)] * len(target_nbr)\n\n source_nbr.append(source)\n target_nbr.append(target)\n x.append(alpha)\n y.append(alpha)\n\n # construct the cost dictionary from x to y\n d = np.zeros((len(x), len(y)))\n\n for i, s in enumerate(source_nbr):\n for j, t in enumerate(target_nbr):\n assert t in length[s], \"Target node not in list, should not happened, pair (%d, %d)\" % (s, t)\n d[i][j] = length[s][t]\n\n x = np.array([x]).T # the mass that source neighborhood initially owned\n y = np.array([y]).T # the mass that target neighborhood needs to received\n\n t0 = time.time()\n rho = cvx.Variable(len(target_nbr), len(source_nbr)) # the transportation plan rho\n\n # objective function d(x,y) * rho * x, need to do element-wise multiply here\n obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(np.multiply(d.T, x.T), rho)))\n\n # \\sigma_i rho_{ij}=[1,1,...,1]\n source_sum = cvx.sum_entries(rho, axis=0)\n constrains = [rho * x == y, source_sum == np.ones((1, (len(source_nbr)))), 0 <= rho, rho <= 1]\n prob = cvx.Problem(obj, constrains)\n\n m = prob.solve() # change solver here if you want\n # print(time.time() - t0, \" secs for cvxpy.\",)\n\n result = 1 - (m / length[source][target]) # divided by the length of d(i, j)\n # print(\"#source_nbr: %d, #target_nbr: %d, Ricci curvature = %f \"%(len(source_nbr), len(target_nbr), result))\n\n return result", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x, y, w, h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / cv2.contourArea(hull)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def process(self):\n contours, hierachy = cv.findContours(\n self.input_image,\n cv.RETR_LIST,\n cv.CHAIN_APPROX_SIMPLE,\n )\n contours = sorted(contours, key=cv.contourArea, reverse=True)\n target = None\n for c in contours:\n path = cv.arcLength(c, True)\n approx = cv.approxPolyDP(c, 0.1*path, True)\n if len(approx) == 4:\n # print(approx)\n target = approx\n break\n if target is not None:\n \"\"\"\n cv.drawContours(\n self.original_image,\n [target],\n 0,\n (255, 255, 255),\n 5\n )\n \"\"\"\n self.output_image = copy.deepcopy(self.original_image)\n for p in target:\n cv.circle(self.output_image, tuple(p[0]), 4, (255, 255, 255), 5)\n return (self.output_image, target)", "def decomposing_poly_cut_by_set_op(P, v, w, epsilon=10e-2):\n\n\n\tgetcontext().prec = 28\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: W not on chain\")\n\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\t# Generate pairs of v and w modified by some epsilon amount \n\tv_l_displacements = [distance_to_v+(i*epsilon) for i in [-1, -2, 0]]\n\tv_r_displacements = [(distance_to_v+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\tw_l_displacements = [distance_to_w+(i*epsilon) for i in [-1, -2, 0]]\n\tw_r_displacements = [(distance_to_w+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\n\tdef splice_polygon(dist_v, dist_w):\n\t\t\"\"\"Portion of decomposing_line_cut_by_splicing wihtout points\n\n\t\tFunction for evaluating validity of candidates\n\t\t\"\"\"\n\n\t\tif dist_w >= chain.length or dist_w == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_v)\n\n\t\t\tp_l = left_chain.coords[:]\n\t\t\tp_r = right_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\t\tif dist_v >= chain.length or dist_v == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_w)\n\n\t\t\tp_l = right_chain.coords[:]\n\t\t\tp_r = left_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\n\t\tif dist_w%chain.length > dist_v%chain.length:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_w_chain).difference(LineString(left_v_chain))\n\n\t\t\tp_l = left_v_chain.coords[:]+right_w_chain.coords[:-1]\n\t\t\tp_r = common.coords[:]\n\n\t\t\treturn p_l, p_r\n\n\t\telse:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_v_chain).difference(LineString(left_w_chain))\n\n\t\t\tp_l = common.coords[:]\n\t\t\tp_r = left_w_chain.coords[:]+right_v_chain.coords[:-1]\n\n\t\t\treturn p_l, p_r\n\n\t# Check every ring for self-intersection, if cut is invalid => self-intersec\n\tfound = False\n\tfor i in range(len(v_l_displacements)):\n\t\tfor j in range(len(w_l_displacements)):\n\n\t\t\t# Check if resultant polygons are valid\n\t\t\tp_l, p_r = splice_polygon(v_l_displacements[i], w_r_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\tp_l, p_r = splice_polygon(v_r_displacements[i], w_l_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\t# Else, we have a valid candidate cut\n\t\t\tfound = True\n\t\t\tbreak\n\n\t\tif found:\n\t\t\tbreak\n\n\tif not found:\n\t\tprint(\"splice_polygon: No correct cut combination found!\")\n\t\treturn\n\n\tv_l = chain.interpolate(v_l_displacements[i]).coords[:]\n\tv_r = chain.interpolate(v_r_displacements[i]).coords[:]\n\tw_l = chain.interpolate(w_l_displacements[j]).coords[:]\n\tw_r = chain.interpolate(w_r_displacements[j]).coords[:]\n\n\tdef get_verts(v_l, v_r):\n\t\t\"\"\"Function for extraction verts between two points\n\t\t\"\"\"\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points\n\n\t# Find all vertecies of the chain betwee v_l and v_r\n\tv_pts = get_verts(v_l_displacements[i], v_r_displacements[i])\n\tw_pts = get_verts(w_l_displacements[j], w_r_displacements[j])\n\n\tpoly = Polygon(*P)\n\n\tcut_poly = Polygon(v_l+v_pts+v_r+w_l+w_pts+w_r)\n\tcut_poly = poly.intersection(cut_poly)\n\n\tprint cut_poly\n\n\tp_l, p_r = poly.difference(cut_poly)\n\n\tprint p_l\n\tprint p_r", "def polynomial_triangulation(u1, P1, u2, P2):\n P1_full = np.eye(4)\n P1_full[0:3, :] = P1[0:3, :] # convert to 4x4\n P2_full = np.eye(4)\n P2_full[0:3, :] = P2[0:3, :] # convert to 4x4\n P_canon = P2_full.dot(cv2.invert(P1_full)[1]) # find canonical P which satisfies P2 = P_canon * P1\n\n # \"F = [t]_cross * R\" [HZ 9.2.4]; transpose is needed for numpy\n F = np.cross(P_canon[0:3, 3], P_canon[0:3, 0:3], axisb=0).T\n\n # Other way of calculating \"F\" [HZ (9.2)]\n # op1 = (P2[0:3, 3:4] - P2[0:3, 0:3] .dot (cv2.invert(P1[0:3, 0:3])[1]) .dot (P1[0:3, 3:4]))\n # op2 = P2[0:3, 0:4] .dot (cv2.invert(P1_full)[1][0:4, 0:3])\n # F = np.cross(op1.reshape(-1), op2, axisb=0).T\n\n # Project 2D matches to closest pair of epipolar lines\n u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))\n\n # For a purely sideways trajectory of 2nd cam, correctMatches() returns NaN for all possible points!\n if np.isnan(u1_new).all() or np.isnan(u2_new).all():\n F = cv2.findFundamentalMat(u1, u2, cv2.FM_8POINT)[0] # so use a noisy version of the fund mat\n u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))\n\n # Triangulate using the refined image points\n return linear_eigen_triangulation(u1_new[0], P1, u2_new[0], P2)", "def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n coeffs = self.coefficients(True)\n m = self.parent().action().zero_filter()\n for c in self.non_zero_components() :\n m = max(m, self.parent().action().minimal_composition_filter( coeffs[c].keys(),\n [self.parent().action().zero_element()] ))\n return m", "def discretize(self, obstacle_collection, drone_poses, goal_poses):\n cyl_c1_list = []\n cyl_c2_list = []\n cyl_r_list = []\n cyl_dir_list = []\n for obs in obstacle_collection.obstacles:\n if isinstance(obs, Cylinder):\n if obs.axis == 'x':\n cyl_dir_list.append(0)\n cyl_c1_list.append(obs.position.y + abs(MIN_Y))\n cyl_c2_list.append(obs.position.z)\n elif obs.axis == 'y':\n cyl_dir_list.append(1)\n cyl_c1_list.append(obs.position.x + abs(MIN_X))\n cyl_c2_list.append(obs.position.z)\n elif obs.axis == 'z':\n cyl_dir_list.append(2)\n cyl_c1_list.append(obs.position.x + abs(MIN_X))\n cyl_c2_list.append(obs.position.y + abs(MIN_Y))\n\n cyl_r_list.append(obs.radius)\n\n x_dim = MAX_X - MIN_X\n y_dim = MAX_Y - MIN_Y\n z_dim = MAX_Z\n mesh_nodes = mesh.generateMesh([x_dim, y_dim, z_dim],\n cyl_c1_list, cyl_c2_list, cyl_r_list, cyl_dir_list,\n self.__angle, self.__size, self.__approximation,\n self.__radiusedge, self.__ratio)\n for _ in mesh_nodes:\n self.__nodes.append(MeshNode())\n for n in mesh_nodes:\n self.__nodes[n.index].x = n.x - abs(MIN_X)\n self.__nodes[n.index].y = n.y - abs(MIN_Y)\n self.__nodes[n.index].z = n.z\n for edge in n.adj:\n self.__nodes[n.index].add_edge(self.__nodes[edge])\n\n def add_node(n):\n for node in self.__nodes:\n # Only short edges that are not on the ground or through obstacles\n if n.dist(node) < MESH_EDGE_DIST and (n.z > DRONE_HEIGHT or node.z > DRONE_HEIGHT):\n valid_edge = True\n for obs in obstacle_collection.obstacles:\n if isinstance(obs, Cylinder):\n cylinder_seg = Segment(Point(obs.position), Point(obs.position))\n if obs.axis == 'x':\n cylinder_seg.b += Point(1000, 0, 0)\n elif obs.axis == 'y':\n cylinder_seg.b += Point(0, 1000, 0)\n else:\n cylinder_seg.b += Point(0, 0, 1000)\n edge_seg = Segment(Point(n.position()), Point(node.position()))\n\n if cylinder_seg.min_distance(edge_seg) < obs.radius:\n valid_edge = False\n if valid_edge:\n n.add_edge(node)\n\n # Adding drone nodes\n drone_nodes = {}\n if drone_poses is not None:\n for drone_id in drone_poses:\n p = drone_poses[drone_id].position\n drone_nodes[drone_id] = MeshNode(p.x, p.y, p.z)\n add_node(drone_nodes[drone_id])\n\n # Adding goal nodes\n goal_nodes = {}\n if goal_poses is not None:\n for drone_id in goal_poses:\n p = goal_poses[drone_id]\n goal_nodes[drone_id] = MeshNode(p.x, p.y, p.z)\n add_node(goal_nodes[drone_id])\n\n return drone_nodes, goal_nodes", "def get_Curvature(self):\n #return str(np.mean([self.line_l.get_CurveRad(), self.line_r.get_CurveRad()]))\n y = np.linspace(0,719, 10)\n x = self.center_poly(y)\n fit_scaled = np.polyfit(y*self.line_l.y_pxm,x*self.line_l.x_pxm, deg=2)\n curverad = ((1 + (2 * fit_scaled[0] * 600 + fit_scaled[1]) ** 2) ** 1.5) / np.absolute(2 * fit_scaled[0])\n\n if len(self.curve_buffer) > 15:\n self.curve_buffer.pop(0)\n\n self.curve_buffer.append(curverad)\n _, self.curve_buffer = self.line_l.remove_outliers(self.curve_buffer,[None]*len(self.curve_buffer), m=3)\n buff_mean= np.mean(self.curve_buffer)\n #print(\"Buf Mean: \" +str(buff_mean))\n #outlier = np.abs(buff_mean - curverad) > np.std(self.curve_buffer)*2\n if curverad > 4000.0:\n buff_mean = \"Straight Lane\"\n else:\n buff_mean = str(int(buff_mean)) + \" m\"\n\n return buff_mean", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def to_prec(self, prec):\n return ComplexField(prec)", "def RemovePolygonHoles_management(in_fc, threshold=0.0):\n desc = arcpy.Describe(in_fc)\n if desc.dataType != \"FeatureClass\" and desc.dataType != \"ShapeFile\":\n print(\"Invalid data type. The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n else:\n if desc.shapeType != \"Polygon\":\n print(\"The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n if threshold < 0.0:\n threshold = 0.0\n with arcpy.da.UpdateCursor(in_fc, [\"SHAPE@\"]) as updateCursor:\n for updateRow in updateCursor:\n shape = updateRow[0]\n new_shape = arcpy.Array()\n for part in shape:\n new_part = arcpy.Array()\n if threshold > 0:\n # find None point in shape part\n # in arcpy module, a None point is used to seperate exterior and interior vertices\n null_point_index = []\n for i in range(len(part)):\n if part[i] is None:\n null_point_index.append(i)\n # if interior vertices exist, create polygons and compare polygon shape area to given threshold\n # if larger, keep vertices, else, dismiss them\n if len(null_point_index) > 0:\n for k in range(0, null_point_index[0]):\n new_part.add(part[k])\n for i in range(len(null_point_index)):\n pointArray = arcpy.Array()\n # determine if the None point is the last one\n if i+1 < len(null_point_index):\n for j in range(null_point_index[i] + 1, null_point_index[i+1]):\n pointArray.add(part[j])\n else:\n for j in range(null_point_index[i] + 1, len(part)):\n pointArray.add(part[j])\n # create a polygon to check shape area against the given threshold\n inner_poly = arcpy.Polygon(pointArray)\n # if larger than threshold, then add to the new part Array\n if inner_poly.area > threshold:\n if i+1 < len(null_point_index):\n for k in range(null_point_index[i], null_point_index[i+1]):\n new_part.add(part[k])\n else:\n for k in range(null_point_index[i], len(part)):\n new_part.add(part[k])\n new_shape.add(new_part)\n # if interior does not exist, add the whole part\n else:\n new_shape.add(part)\n else:\n # get the first None point index\n first_null_point_index = 0\n for i in range(len(part)):\n if part[i] is None:\n first_null_point_index = i\n break\n if first_null_point_index == 0:\n new_shape.add(part)\n else:\n for j in range(first_null_point_index):\n new_part.add(part[j])\n new_shape.add(new_part)\n if len(new_shape) > 0:\n new_poly = arcpy.Polygon(new_shape)\n updateRow[0] = new_poly\n updateCursor.updateRow(updateRow)", "def detect(self, contour):\n # Initialize the shape name and get the shape perimeter.\n shape = \"unidentified\"\n perimeter = cv2.arcLength(contour, True)\n\n # Approximate the contour to 'smooth' the shape. Perimeter of appoximation can be up to 4% different.\n approx = cv2.approxPolyDP(contour, 0.03 * perimeter, True)\n\n # len() will give the number of vertices of the shape.\n if len(approx) == 3:\n shape = \"triangle\"\n\n # Check if the sides are all equal for special case of square.\n elif len(approx) == 4:\n # Compute the bounding box of the contour and use the bounding box to compute the aspect ratio.\n (x, y, w, h) = cv2.boundingRect(approx)\n aspect_ratio = w / float(h)\n # A square will have an aspect ratio that is close to 1, otherwise, the shape is a rectangle.\n shape = \"square\" if aspect_ratio >= 0.95 and aspect_ratio <= 1.05 else \"rectangle\"\n\n elif len(approx) == 5:\n shape = \"pentagon\"\n\n # Otherwise assume the shape is a circle.\n else:\n shape = \"circle\"\n\n return shape", "def precision_reduce(self, precision):\n return _unary_geo(arctern.ST_PrecisionReduce, self, precision)", "def make_mask(shape, contour):\n mask = np.zeros(shape, np.int32)\n cv2.drawContours(mask, [contour], 0, (255), -1)\n return mask", "def Frac_Cov(c, N, K):\n return (((c+N+(55.5/K))-np.sqrt(np.square(c+N+(55.5/K))-(4*c*N)))/(2*N));", "def test_lineclip():\n # %% LOWER to UPPER test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 0, 4, 6)\n\n assert [x1, y1, x2, y2] == approx([2, 3, 3.3333333333333, 5])\n # %% no intersection test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 0.1, 0, 0.1)\n\n assert x1 is None and y1 is None and x2 is None and y2 is None\n # %% left to right test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 4, 5, 4)\n\n assert [x1, y1, x2, y2] == [1, 4, 4, 4]", "def RecursiveSubdivideCurveIfNeeded(context, CurveObj, nDesiredVertNum):\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.context.scene.objects.active = CurveObj\n\n CurvePoints = [(vert.x, vert.y, vert.z) for vert in [p.co for p in CurveObj.data.splines[0].points]]\n \n if len(CurvePoints) < nDesiredVertNum:\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.curve.select_all(action = 'SELECT')\n bpy.ops.curve.subdivide()\n return RecursiveSubdivideCurveIfNeeded(context, CurveObj, nDesiredVertNum)\n else:\n bpy.ops.object.mode_set(mode='OBJECT')\n return CurveObj", "def cubicTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def setContourMode(interval, quotient):\n dislin.conmod(interval, quotient)", "def convgridone(a, pi, fi, gcf, v):\n \tsx, sy= gcf[0][0].shape[0]/2, gcf[0][0].shape[1]/2", "def resimplify_Curve(curve):\r\n\r\n curve=scriptcontext.doc.Objects.Add(curve)\r\n vertices = rs.PolylineVertices(curve)\r\n best_candidate=curve\r\n best_v_len = len(vertices)\r\n\r\n for i in range(len(vertices)):\r\n new_candidate = rs.CopyObject(curve)\r\n rs.CurveSeam(new_candidate, rs.CurveClosestPoint(new_candidate,vertices[i]))\r\n rs.SimplifyCurve(new_candidate)\r\n v_len = len(rs.PolylineVertices(new_candidate))\r\n if v_len < best_v_len:\r\n best_candidate = rs.CopyObject(new_candidate)\r\n best_v_len = v_len\r\n return rs.coercecurve(best_candidate)", "def detect_shape(contour):\n # Initialize the shape name and approximate the contour\n shape = \"unidentified\"\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.04 * peri, True)\n if len(approx) == 3:\n shape = \"triangle\"\n if len(approx) == 4:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n # a square will have an aspect ratio that is approximately\n # equal to one, otherwise, the shape is a rectangle\n shape = \"square\" if 0.95 <= ar <= 1.05 else \"rectangle\"\n elif len(approx) == 5:\n shape = \"pentagon\"\n else:\n shape = \"circle\"\n return shape", "def simplify_line_vw(points, small_area=100):\r\n while len(points) > 3:\r\n \r\n # For each coordinate that forms the apex of a two-segment\r\n # triangle, find the area of that triangle and put it into a list\r\n # along with the index, ordered from smallest to largest.\r\n \r\n popped, preserved = set(), set()\r\n \r\n triples = zip(points[:-2], points[1:-1], points[2:])\r\n triangles = [Polygon((p1, p2, p3)) for (p1, p2, p3) in triples]\r\n areas = [(triangle.area, index) for (index, triangle) in enumerate(triangles)]\r\n \r\n # Reduce any segments that makes a triangle whose area is below\r\n # the minimum threshold, starting with the smallest and working up.\r\n # Mark segments to be preserved until the next iteration.\r\n\r\n for (area, index) in sorted(areas):\r\n if area > small_area:\r\n # nothing more can be removed on this iteration\r\n break\r\n \r\n if (index + 1) in preserved:\r\n # current index is too close to a previously-preserved one\r\n continue\r\n \r\n preserved.add(index)\r\n popped.add(index + 1)\r\n preserved.add(index + 2)\r\n \r\n if not popped:\r\n # nothing was removed so we are done\r\n break\r\n \r\n # reduce the line, then try again\r\n points = [point for (index, point) in enumerate(points) if index not in popped]\r\n \r\n return list(points)", "def contours(info, color, line, mean_marker):\n\teigenval, eigenvec = np.linalg.eigh(info['covar'])\n\n\taxis11, axis12 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 1)\n\taxis21, axis22 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 2)\n\taxis31, axis32 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 3)\n\tangle = axis12['xangle']\t\n\tangle = angle * 180 / math.pi\n\n\tellipse1 = Ellipse(xy=info['mean'], width=axis11['length'], height=axis12['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse2 = Ellipse(xy=info['mean'], width=axis21['length'], height=axis22['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse3 = Ellipse(xy=info['mean'], width=axis31['length'], height=axis32['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\n\tax = plt.gca()\n\tax.add_patch(ellipse3)\n\tax.add_patch(ellipse2)\n\tax.add_patch(ellipse1)\n\tax.set_xlim(-0.4, 0.4)\n\tax.set_ylim(0.5, 2.0)\n\tplt.plot(info['mean'][0], info['mean'][1], marker=mean_marker, mfc='none', mec=color, markersize=8, mew=2)\n\tsigma1 = {'ax1':axis11['length'], 'ax2':axis12['length'], 'xangle1':axis11['xangle'], 'xangle2':axis12['xangle']}\n\tsigma2= {'ax1':axis21['length'], 'ax2':axis22['length'], 'xangle1':axis21['xangle'], 'xangle2':axis22['xangle']}\n\tsigma3 = {'ax1':axis31['length'], 'ax2':axis32['length'], 'xangle1':axis31['xangle'], 'xangle2':axis32['xangle']}\n\n\treturn sigma1, sigma2, sigma3", "def Draw1D(mesh, coefs, keep=False, n_p=2, figsize=(20,4)):\n if n_p <= 2:\n n_p = 2\n \n eps = 1e-6 \n \n x_v = [p[0] for p in mesh.ngmesh.Points()]\n x_s = []\n f_s = {}\n\n miny = 1e99\n for f, name in coefs:\n f_s[name] = []\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n \n for el in mesh.ngmesh.Elements1D():\n left = mesh.ngmesh.Points()[el.points[0]][0]\n right = mesh.ngmesh.Points()[el.points[1]][0]\n for l in range(n_p):\n y = left + eps + (l / (n_p-1)) * (right - eps -left) \n x_s.append(y)\n for f,name in coefs:\n ff = f(mesh(y))\n miny = min(miny,ff)\n f_s[name].append(ff)\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n\n \n # plt.clf()\n # display.display(plt.gcf())\n plt.figure(figsize=figsize)\n for f,name in coefs:\n plt.plot(x_s,f_s[name],label=name)\n plt.plot(x_v,[miny for v in x_v],'|',label='vertices')\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n if keep:\n display.clear_output(wait=True)", "def getContours(img):\n # mode: gets only exrteme outer contours\n # method: stores all contour points\n contours, hierarchy = cv2.findContours(img, mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_NONE)\n \n x, y, w, h = 0, 0, 0, 0\n\n # improving accuracy of contours\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 500:\n # assumes cnt with area > 500 is closed contour \n perimeter = cv2.arcLength(cnt, True)\n\n # approximate polygonal curve of cnt\n # set max difference between original and approxCurve as \n # 0.02 * perimeter\n approxCurve = cv2.approxPolyDP(cnt, 0.02 * perimeter, closed=True)\n \n x, y, w, h = cv2.boundingRect(approxCurve)\n # wand point is is center-left tip\n return x + (w // 2), y", "def GetConcBeer(Abs, epsilon, pathLength):\n return Abs / (epsilon * pathLength)", "def compute_contour_coverage(\n contoursA: ContourList,\n contoursB: ContourList,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n):\n arr = np.zeros((len(contoursA), len(contoursB)), dtype=\"float32\")\n box_coverage = compute_coverage(\n *[\n np.array(\n [\n np.concatenate([contour.min(axis=0), contour.max(axis=0)])\n for contour in contours\n ]\n )\n for contours in [contoursA, contoursB]\n ]\n )\n for idx1, contour1 in enumerate(contoursA):\n for idx2, contour2 in enumerate(contoursB):\n if box_coverage[idx1, idx2] == 0:\n arr[idx1, idx2] = 0\n continue\n arr[idx1, idx2] = compute_coverage_for_contour_pair(\n contour1, contour2, max_size=max_size\n )\n return arr", "def drawShapes(contours, realImg, minArea=500, name=False):\n\n # Getting Shape of Real Image\n realH, realW, _ = realImg.shape\n\n thickness = realW // 275 # Setting Thickness\n fontScale = realH / 1000 # Setting fontScale\n\n if not contours:\n print(\"No Contours Found\")\n for cnt in contours:\n cntArea = cv2.contourArea(cnt) # Getting Area\n # i = -1\n # i += 1\n # color = (0, 255, 0)\n\n if cntArea < minArea:\n continue\n\n # Drawing contours\n # cv2.drawContours(realImg, [cnt], i, color, 3)\n\n # Finding Perimeters of each cnt\n peri = cv2.arcLength(cnt, True) # True for Closed Shapes\n approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)\n\n # Getting x, y co-ordinates and width, height of each cnt\n x, y, w, h = cv2.boundingRect(approx)\n\n # Making Black Rectangle around each contour\n cv2.rectangle(realImg,\n (x - thickness, y - thickness),\n (x + w + thickness, y + h + thickness),\n (0, 0, 0), thickness)\n\n # Making White Rectangle around each contour\n cv2.rectangle(realImg, (x, y), (x + w, y + h),\n (255, 255, 255), thickness // 2)\n\n if not name:\n continue\n\n corners = len(approx) # Counting Corners\n objType = None\n\n if corners == 3:\n objType = \"Triangle\"\n\n elif corners == 4:\n aspRatio = w / float(h)\n if aspRatio > 0.9 and aspRatio < 1.1:\n objType = \"Square\"\n else:\n objType = \"Rectangle\"\n\n elif corners == 5:\n objType = \"Pentagon\"\n\n elif corners == 6:\n objType = \"Hexagon\"\n\n elif corners > 7:\n if detectObject.isCircle(cnt, realImg):\n objType = \"Circle\"\n\n if objType is None:\n objType = \"Not Found\"\n\n if thickness == 1:\n thickness = 2\n\n # Setting FontFace\n fontFace = cv2.FONT_HERSHEY_DUPLEX\n\n # Getting Shape of Image\n h2, _, _ = realImg.shape\n\n # If Image Height is too Small, increase fontScale\n if h2 < 300:\n fontScale += 0.04\n elif h2 < 400:\n fontScale += 0.02\n\n textBgHeight = int(fontScale * 40)\n k = 0\n \n if h2 > 500:\n k = textBgHeight // 3\n\n # Copying the contents where we want to write text\n sub_img = realImg[y+h: y+h+textBgHeight+k, x:x+w]\n\n # Create a new black image with same shape\n rect = np.zeros(sub_img.shape, dtype=np.uint8)\n\n # Creating a new transparent image from\n # `copied image(sub_img)` and `black image`\n res = cv2.addWeighted(sub_img, .25, rect, .5, 1.0)\n\n # Replacing the part of realImg\n realImg[y+h: y+h+textBgHeight+k, x: x+w] = res\n\n # Putting White Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (255, 255, 255), thickness * 2)\n\n # # Putting Black Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (0, 0, 0), thickness // 2)", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def getCV(file1, type, imgOri,iteration):\n del s_arrays[:]\n del shapesContours[:]\n\n if type == 'shapes':\n spContours = getContours(imgOri,iteration)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n spContours = clean_Con(spContours)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n cv2.drawContours(imgOri, spContours, -1, (0, 255, 128), 5)\n # del s_arrays[:]\n # for each shape\n for cons in spContours:\n sampleComVector = []\n x, y, w, h = cv2.boundingRect(cons)\n cv2.rectangle(imgOri, (x, y), (x + w, y + h), (100, 100, 100), 1)\n\n # move the points to center\n for point in cons:\n sampleComVector.append(complex(point[0][0] - x, (point[0][1] - y)))\n # sampleComVectors store CV of all testees contours\n s_arrays.append(sampleComVector)\n # sampleContours store all testees contours, same order with sampleComVectors\n shapesContours.append(cons)\n\n elif type == 'temp':\n # Automatically find templete contour\n templetTrue = imgOri\n tpContour = getContours(templetTrue,iteration)\n for contour in tpContour:\n x, y, w, h = cv2.boundingRect(contour)\n #\n for point in contour:\n # -x and -y are to make left and upper boundry start from 0\n t_array.append(complex(point[0][0] - x, (point[0][1] - y)))", "def test_intersect_volume(self):\n\n intersect_shape = ExtrudeCircleShape(points=[(30, 0)], radius=5, distance=50)\n\n intersected_shape = ExtrudeCircleShape(\n points=[(30, 0)],\n radius=10,\n distance=50,\n intersect=[self.test_shape, intersect_shape],\n )\n\n assert intersected_shape.volume() == pytest.approx(math.pi * 5**2 * 30)", "def reduce(poly, nonEmptyBounded=1, abs_tol=ABS_TOL):\n if isinstance(poly, Region):\n lst = []\n for poly2 in poly.list_poly:\n red = reduce(poly2)\n if is_fulldim(red):\n lst.append(red)\n if len(lst) > 0:\n return Region(lst, poly.props)\n else:\n return Polytope()\n # is `poly` already in minimal representation ?\n if poly.minrep:\n return poly\n if not is_fulldim(poly):\n return Polytope()\n # `poly` isn't flat\n A_arr = poly.A\n b_arr = poly.b\n # Remove rows with b = inf\n keep_row = np.nonzero(poly.b != np.inf)\n A_arr = A_arr[keep_row]\n b_arr = b_arr[keep_row]\n neq = np.shape(A_arr)[0]\n # first eliminate the linearly dependent rows\n # corresponding to the same hyperplane\n # Normalize all rows\n a_norm = 1 / np.sqrt(np.sum(A_arr.T**2, 0))\n a_normed = np.dot(A_arr.T, np.diag(a_norm)).T\n remove_row = []\n for i in range(neq):\n for j in range(i + 1, neq):\n # If the product of two vectors are close to 1,\n # since they are both unit vectors,\n # they must represent parallel hyperplanes\n if np.dot(a_normed[i].T, a_normed[j]) > 1 - abs_tol:\n # Check which inequality that constrains the most\n b_in = b_arr[i] * a_norm[i]\n b_jn = b_arr[j] * a_norm[j]\n if b_in < b_jn:\n remove_row.append(j)\n else:\n remove_row.append(i)\n keep_row = np.setdiff1d(range(neq), remove_row).tolist()\n A_arr = A_arr[keep_row]\n b_arr = b_arr[keep_row]\n neq, nx = A_arr.shape\n if nonEmptyBounded:\n if neq <= nx + 1:\n return Polytope(A_arr, b_arr)\n # Now eliminate hyperplanes outside the bounding box\n if neq > 3 * nx:\n lb, ub = Polytope(A_arr, b_arr).bounding_box\n # Do a coordinate system translation such that the lower bound is\n # moved to the origin\n # A*(x-lb) <= b - A*lb\n # Relative to the origin, a row ai in A with only positive coefficients\n # represents an upper bound. If ai*(x1-lb) <= bi,\n # the hyperplane is above x1.\n # Hence, if ai*(ub-lb) <= bi, then the hyperplane at row i\n # does not intersect the bounding box.\n # The same holds for rows with negative coefficients multiplied with\n # the origin. Rows with both negative and positive coefficients\n # are a mixture of the two extremes.\n cand = ~ (np.dot((A_arr > 0) * A_arr, ub - lb) -\n (np.array([b_arr]).T - np.dot(A_arr, lb)) < -1e-4)\n A_arr = A_arr[cand.squeeze()]\n b_arr = b_arr[cand.squeeze()]\n neq, nx = A_arr.shape\n if nonEmptyBounded:\n if neq <= nx + 1:\n return Polytope(A_arr, b_arr)\n # Check for each inequality whether it is implied by\n # the other inequalities, i.e., is it redundant?\n del keep_row[:]\n for k in range(neq):\n # Setup object function to maximize the linear function\n # defined as current row of A matrix\n f = -A_arr[k, :]\n G = A_arr\n h = b_arr\n # Give some slack in the current inequality\n h[k] += 0.1\n sol = lpsolve(f, G, h)\n h[k] -= 0.1\n if sol['status'] == 0:\n # If the maximum is greater than the constraint of\n # the inequality, then the inequality constrains solutions\n # and thus the inequality is non-redundant\n obj = -sol['fun'] - h[k]\n if obj > abs_tol:\n keep_row.append(k)\n elif sol['status'] == 3:\n keep_row.append(k)\n polyOut = Polytope(A_arr[keep_row], b_arr[keep_row])\n polyOut.minrep = True\n return polyOut", "def _projectPointEllipse_(a, b, y0, y1, tol):\r\n if y0>tol and y1>tol:\r\n fObj = lambda t: (a*y0/(t+a**2))**2 + (b*y1/(t+b**2))**2 - 1\r\n tmin = -b**2 + b*y1\r\n tmax = -b**2 + np.sqrt(a**2*y0**2+b**2*y1**2)\r\n tbar = optimize.brentq(fObj, tmin, tmax);\r\n x0 = a**2*y0/(tbar+a**2);\r\n x1 = b**2*y1/(tbar+b**2);\r\n d = np.sign(tbar) * np.sqrt((x0-y0)**2 + (x1-y1)**2)\r\n\r\n elif y1 > tol: #and y0==0\r\n x0 = 0\r\n x1 = b\r\n d = (y1-x1)\r\n\r\n\r\n elif y0 > (a**2-b**2)/a: #and y1==0\r\n x0 = a\r\n x1 = 0\r\n d = (y0-x0)\r\n\r\n else: #if y1==0 and y0<thresh\r\n x0 = a**2*y0/(a**2-b**2)\r\n x1 = b*np.sqrt(1- (x0/a)**2)\r\n d = np.sqrt( (x0-y0)**2 + x1**2)\r\n\r\n return d, (x0, x1)", "def reduce(self, threshold):\n def percentage_change(old, new):\n return (old - new) / old\n real_reduction_iterations = 0\n padic_reduction_iterations = 0\n cont_reduction_iterations = 0\n factor = len(self.constants.primes) + 1\n \n print('initial bound',max(self.coefficients['n1_bound'],max(self.coefficients['Z_bounds'])))\n\n # First, go through the real reduction loop.\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = None\n while True:\n real_reduction_iterations += 1\n logging.info(\"Real Reduction - Iteration %d\" % real_reduction_iterations)\n\n large_constant = self.calculate_large_constant(current_n1_bound, factor)\n logging.info(\"Large constant contains %d digits \" % large_constant.ndigits())\n\n # Find a new bound on n_1 - n_k\n new_diff_bound = self.real_reduce(current_n1_bound, large_constant)\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n self.update_real_constants(new_diff_bound)\n logging.info(\"new diff bound: \" + str(new_diff_bound))\n logging.info(\"New bound on n1: \" + str(self.coefficients[\"n1_bound\"]))\n logging.info(\"New bound on zi: \" + str(self.coefficients['Z_bounds']))\n \n if percentage_change(current_n1_bound, self.coefficients[\"n1_bound\"]) < self.threshold:\n logging.info(\"New bound did not improve in the real step; real reduction process is done.\")\n factor = factor + 5\n break\n\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = new_diff_bound\n\n # Second, go through the p-adic reduction loop.\n current_Z_bounds = self.coefficients['Z_bounds']\n while True:\n padic_reduction_iterations += 1\n logging.info(\"p-adic Reduction - Iteration %d\" % padic_reduction_iterations)\n\n new_Z_bounds = self.padic_reduce(math.ceil(current_diff_bound))\n logging.info(\"New bound on zi: \" + str(new_Z_bounds))\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n new_n1_bound = self.update_padic_constants(new_Z_bounds)\n logging.info(\"New bound on n1: \" + str(new_n1_bound))\n if percentage_change(current_n1_bound, new_n1_bound) < self.threshold:\n logging.info(\"New bound did not improve in the p-adic step; p-adic reduction process is done.\")\n break\n\n current_n1_bound = new_n1_bound\n\n print(current_n1_bound)\n\n return self.constants", "def createCubicBezier(self):\n return _libsbml.Curve_createCubicBezier(self)", "def ccw(p1: np.ndarray, p2: np.ndarray, p3: np.ndarray) -> int:\n dx1 = p2[0] - p1[0]\n dy1 = p2[1] - p1[1]\n dx2 = p3[0] - p1[0]\n dy2 = p3[1] - p1[1]\n\n dx1dy2 = dx1 * dy2\n dy1dx2 = dy1 * dx2\n\n if dx1dy2 > dy1dx2:\n return 1\n if dx1dy2 < dy1dx2:\n return -1\n if dx1 * dx2 < 0 or dy1 * dy2 < 0:\n return -1\n if dx1 * dx1 + dy1 * dy1 < dx2 * dx2 + dy2 * dy2:\n return 1\n\n return 0", "def append_point_by_derivative(contour_points, target_index, target_contour):\n target_contour_points = target_contour.points\n distance = 0xFFFFFF\n points_to_append, rate = None, 0\n x_value, y_value = contour_points[target_index].position\n\n try:\n # Calculates gradient by derivative.\n gradient = -1 / calculate_derivative(contour_points, target_index)\n # Line's equation.\n linear_function = lambda x: gradient*x + y_value - (x_value*gradient)\n # Extends 500 up and down from standard point.\n line = bezier.Curve(np.asfortranarray([\n [x_value+500, x_value-500],\n [linear_function(x_value+500), linear_function(x_value-500)]\n ]), degree=1)\n except ZeroDivisionError:\n line = bezier.Curve(np.asfortranarray([\n [x_value, x_value],\n [float(y_value+500), float(y_value-500)]\n ]), degree=1)\n\n # Finds what curve in target contour is meeted with line.\n for i, _ in enumerate(target_contour_points):\n if i == target_index and target_contour_points == contour_points:\n continue\n if target_contour_points[i].type != 'offcurve' \\\n and target_contour_points[i-1].type == 'offcurve':\n nodes = np.asfortranarray([\n [float(target_contour_points[i+j].x) for j in range(-3, 1)],\n [float(target_contour_points[i+j].y) for j in range(-3, 1)]\n ])\n curve = bezier.Curve(nodes, degree=3)\n\n # If line meet curve.\n if _is_curve_meet(line, curve):\n meeting_object = curve.evaluate(curve.intersect(line)[0, :][0])\n meeting_point = tuple(meeting_object.flatten())\n new_distance = _calculate_distance( \\\n contour_points[target_index].position, meeting_point)\n # Finds nearest curve.\n if new_distance < distance:\n distance = new_distance\n points_to_append = [target_contour_points[i+j] \\\n for j in range(-3, 1)]\n rate = curve.locate(meeting_object)\n\n # Appends point at target curve.\n if points_to_append and rate:\n appendtools.append_point_rate(target_contour, points_to_append, rate)", "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def isConvex(data, boundaryPointsDict, triangleDict, approximation ,demo = True):\n\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= epsilon:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n if demo:\n plotDemo(data, point, bdrPntIdx)\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def equivalentDiameter(cnt):\n\treturn np.sqrt(4 * (cv2.contourArea(cnt)) / np.pi)", "def detect_and_draw_contours(frame, thresh, meas_last, meas_now, min_area = 0, max_area = 10000, ellipses = False, directors = False):\n # Detect contours and draw them based on specified area thresholds\n img, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n final = frame.copy()\n\n i = 0\n meas_last = meas_now.copy()\n del meas_now[:]\n director = 0. \n rx = ry = 0.\n cx = cy = 0.\n\n fname_min_enc_C = \"min_enc_C.dat\"\n f_min_enc_C = open(fname_min_enc_C,'a+')\n R_min_enc_C = x_min_enc_C = y_min_enc_C = 0.\n \n while i < len(contours):\n area = cv2.contourArea(contours[i])\n if area < min_area or area > max_area:\n del contours[i]\n else:\n\n cv2.drawContours(final, contours, i, (0,0,255), 1)\n # add ellipse here\n if ( ellipses ):\n ellipse = cv2.fitEllipse(contours[i])\n cv2.ellipse(final,ellipse,(0,255,0),2)\n M = cv2.moments(contours[i])\n\n # here is the ouput showing minEnclosingCircle, which should\n # basically give a long-axis measurement of any given ellipse\n (x_min_enc_C, y_min_enc_C), R_min_enc_C = cv2.minEnclosingCircle(contours[i]) \n f_min_enc_C.write(\"%e %e %e\\n\" %(x_min_enc_C,y_min_enc_C,R_min_enc_C))\n\n if M['m00'] != 0:\n cx = M['m10']/M['m00']\n cy = M['m01']/M['m00']\n if ( directors ):\n mu20 = M['m20']/M['m00'] - pow(cx,2)\n mu02 = M['m02']/M['m00'] - pow(cy,2)\n mu11 = M['m11']/M['m00'] - cx*cy\n else:\n \tcx = 0\n \tcy = 0\n\n if ( directors ):\n ry = 2*mu11\n rx = mu20-mu02\n if rx == 0:\n atan = 0.5*np.pi\n if ry < 0: atan *= -1 \n director = np.fmod(0.5*atan,2*np.pi) + np.pi\n else:\n director = np.fmod(0.5*np.arctan(ry/rx),2*np.pi) + np.pi\n if (rx < 0):\n director += np.pi/2.\n\n vsize = 10\n cv2.line(final,\n (int(cx - vsize*np.cos(director)), int(cy - vsize*np.sin(director))),\n (int(cx + vsize*np.cos(director)), int(cy + vsize*np.sin(director))), \n (255,0,0),2)\n meas_now.append([cx,cy,director])\n else: \n meas_now.append([cx,cy])\n\n i += 1\n\n f_min_enc_C.close()\n\n fname_ndist = \"ndist.dat\"\n f_ndist = open(fname_ndist,'a+')\n meas_now = np.array(meas_now)\n for i in range(len(meas_now)):\n for j in range(i+1,len(meas_now)):\n f_ndist.write(\"%e \\n\" % distance(meas_now[i,:-1],meas_now[j,:-1]))\n f_ndist.close()\n meas_now = list(meas_now)\n \n return final, contours, meas_last, meas_now", "def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def detectShape(c):\n shape = \"unidentified\"\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.04 * peri, True)\n\n # if the shape is a triangle, it will have 3 vertices\n if len(approx) == 3:\n shape = \"triangle\"\n \n # if the shape has 4 vertices, it is either a square or\n # a rectangle\n elif len(approx) == 4:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n \n # a square will have an aspect ratio that is approximately\n # equal to one, otherwise, the shape is a rectangle\n shape = \"square\" if (ar == 0.95 and ar <= 1.05) else \"rectangle\"\n \n # if the shape is a pentagon, it will have 5 vertices\n elif len(approx) == 5:\n shape = \"pentagon\"\n \n # otherwise, we assume the shape is a circle\n else:\n shape = \"circle\"\n \n # return the name of the shape\n return shape", "def test_project(self):\n import itertools\n from numpy import array, dot\n from numpy.linalg import det\n\n # our little magic constant\n magic = 0.33377777373737737777\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 1 + magic, -1 - magic):\n \n s = space(curvature=k)\n\n # test line preserving projection\n # 3 points are colinear when\n # | x1 y1 1 |\n # | x2 y2 1 | = 0\n # | x3 y3 1 |\n # let's test this!\n\n for p, q in itertools.permutations((\n (1, 0),\n (3/5, 4/5),\n (-5/13, 12/13),\n (-8/17, -15/17),\n ), 2):\n p = s.make_point(p, magic)\n q = s.make_point(q, magic)\n u = p.project(projection_types.preserve_lines)\n v = (p+q).project(projection_types.preserve_lines)\n w = (p+(-magic)*q).project(projection_types.preserve_lines)\n d = det([[*u, 1],[*v, 1],[*w, 1]])\n self.assertTrue(abs(d) < 1e-9)\n\n # test angle preserving projection\n # map will be conformal, so we do like a secant test\n\n delta = 1e-9\n vi = s.make_point((1, 0, 0), delta)\n vj = s.make_point((0, 1, 0), delta)\n vk = s.make_point((0, 0, 1), delta)\n for p in (\n (1, 0, 0),\n (0, 3/5, 4/5),\n (-5/13, 12/13, 0),\n (2/11, 6/11, 9/11),\n (3/7, 6/7, 2/7)\n ):\n p = s.make_point(p, magic)\n pp = p.project(projection_types.preserve_angles)\n pi, pj, pk = (array((p+v).project(projection_types.preserve_angles)) - pp for v in (vi, vj, vk))\n # should stay orthogonal and same size\n # note that we're doing a secant thing so it's only approximate\n # thus we set a relatively high tolerance\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pj, pj),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pk, pk),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pj),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pk),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pj, pk),\n 0,\n abs_tol = 1e-6\n ))", "def test_2D(self):\n grid_limits = [(-4, 4), (-4, 4)]\n\n A = CArray.eye(2, 2)\n b = CArray.zeros(2).T\n circle = CFunction.create('quadratic', A, b, 0)\n\n self._test_2D(circle, grid_limits, levels=[16])", "def createCubicBezier(self):\n return _libsbml.Layout_createCubicBezier(self)", "def _curvature(self):\n y_eval = self.left_fitx.shape[0] - 10\n left_curverad = (((1 + (2 * self.left_fit[0] * y_eval + self.left_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.left_fit[0]))\n right_curverad = (((1 + (2 * self.right_fit[0] * y_eval + self.right_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.right_fit[0]))\n return left_curverad, right_curverad", "def check_approx_decomposition(self, target_unitary, decomposer, num_basis_uses):\n self.assertEqual(decomposer.num_basis_gates(target_unitary), num_basis_uses)\n decomp_circuit = decomposer(target_unitary)\n self.assertEqual(num_basis_uses, decomp_circuit.count_ops().get(\"unitary\", 0))", "def simplify(self, tolerance=1e-3):\n for n, points in enumerate(self.polygons):\n self.polygons[n] = _simplify(points, tolerance=tolerance)\n if self.parent is not None:\n self.parent._bb_valid = False\n return self", "def _computeGC2(rupture, lon, lat, depth):\n\n quadlist = rupture.getQuadrilaterals()\n quadgc2 = copy.deepcopy(quadlist)\n\n oldshape = lon.shape\n\n if len(oldshape) == 2:\n newshape = (oldshape[0] * oldshape[1], 1)\n else:\n newshape = (oldshape[0], 1)\n\n #--------------------------------------------------------------------------\n # Define a projection that spans sites and rupture\n #--------------------------------------------------------------------------\n\n all_lat = np.append(lat, rupture.lats)\n all_lon = np.append(lon, rupture.lons)\n\n west = np.nanmin(all_lon)\n east = np.nanmax(all_lon)\n south = np.nanmin(all_lat)\n north = np.nanmax(all_lat)\n proj = get_orthographic_projection(west, east, north, south)\n\n totweight = np.zeros(newshape, dtype=lon.dtype)\n GC2T = np.zeros(newshape, dtype=lon.dtype)\n GC2U = np.zeros(newshape, dtype=lon.dtype)\n\n #--------------------------------------------------------------------------\n # First sort out strike discordance and nominal strike prior to\n # starting the loop if there is more than one group/trace.\n #--------------------------------------------------------------------------\n group_ind = rupture._getGroupIndex()\n\n # Need group_ind as numpy array for sensible indexing...\n group_ind_np = np.array(group_ind)\n uind = np.unique(group_ind_np)\n n_groups = len(uind)\n\n if n_groups > 1:\n #----------------------------------------------------------------------\n # The first thing we need to worry about is finding the coordinate\n # shift. U's origin is \"selected from the two endpoints most\n # distant from each other.\"\n #----------------------------------------------------------------------\n\n # Need to get index of first and last quad\n # for each segment\n iq0 = np.zeros(n_groups, dtype='int16')\n iq1 = np.zeros(n_groups, dtype='int16')\n for k in uind:\n ii = [i for i, j in enumerate(group_ind) if j == uind[k]]\n iq0[k] = int(np.min(ii))\n iq1[k] = int(np.max(ii))\n\n #----------------------------------------------------------------------\n # This is an iterator for each possible combination of traces\n # including trace orientations (i.e., flipped).\n #----------------------------------------------------------------------\n\n it_seg = it.product(it.combinations(uind, 2),\n it.product([0, 1], [0, 1]))\n\n # Placeholder for the trace pair/orientation that gives the\n # largest distance.\n dist_save = 0\n\n for k in it_seg:\n s0ind = k[0][0]\n s1ind = k[0][1]\n p0ind = k[1][0]\n p1ind = k[1][1]\n if p0ind == 0:\n P0 = quadlist[iq0[s0ind]][0]\n else:\n P0 = quadlist[iq1[s0ind]][1]\n if p1ind == 0:\n P1 = quadlist[iq1[s1ind]][0]\n else:\n P1 = quadlist[iq0[s1ind]][1]\n\n dist = geodetic.distance(P0.longitude, P0.latitude, 0.0,\n P1.longitude, P1.latitude, 0.0)\n if dist > dist_save:\n dist_save = dist\n A0 = P0\n A1 = P1\n\n #----------------------------------------------------------------------\n # A0 and A1 are the furthest two segment endpoints, but we still\n # need to sort out which one is the \"origin\".\n #----------------------------------------------------------------------\n\n # This goofy while-loop is to adjust the side of the rupture where the\n # origin is located\n dummy = -1\n while dummy < 0:\n A0.depth = 0\n A1.depth = 0\n p_origin = Vector.fromPoint(A0)\n a0 = Vector.fromPoint(A0)\n a1 = Vector.fromPoint(A1)\n ahat = (a1 - a0).norm()\n\n # Loop over traces\n e_j = np.zeros(n_groups)\n b_prime = [None] * n_groups\n for j in range(n_groups):\n P0 = quadlist[iq0[j]][0]\n P1 = quadlist[iq1[j]][1]\n P0.depth = 0\n P1.depth = 0\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n b_prime[j] = p1 - p0\n e_j[j] = ahat.dot(b_prime[j])\n E = np.sum(e_j)\n\n # List of discordancy\n dc = [np.sign(a) * np.sign(E) for a in e_j]\n b = Vector(0, 0, 0)\n for j in range(n_groups):\n b.x = b.x + b_prime[j].x * dc[j]\n b.y = b.y + b_prime[j].y * dc[j]\n b.z = b.z + b_prime[j].z * dc[j]\n bhat = b.norm()\n dummy = bhat.dot(ahat)\n if dummy < 0:\n tmpA0 = copy.deepcopy(A0)\n tmpA1 = copy.deepcopy(A1)\n A0 = tmpA1\n A1 = tmpA0\n\n #----------------------------------------------------------------------\n # To fix discordancy, need to flip quads and rearrange\n # the order of quadgc2\n #----------------------------------------------------------------------\n\n # 1) flip quads\n for i in range(len(quadgc2)):\n if dc[group_ind[i]] < 0:\n quadgc2[i] = reverse_quad(quadgc2[i])\n\n # 2) rearrange quadlist order\n qind = np.arange(len(quadgc2))\n for i in range(n_groups):\n qsel = qind[group_ind_np == uind[i]]\n if dc[i] < 0:\n qrev = qsel[::-1]\n qind[group_ind_np == uind[i]] = qrev\n\n quadgc2old = copy.deepcopy(quadgc2)\n for i in range(len(qind)):\n quadgc2[i] = quadgc2old[qind[i]]\n\n # End of if-statement for adjusting group discordancy\n\n s_i = 0.0\n l_i = np.zeros(len(quadgc2))\n\n for i in range(len(quadgc2)):\n G0, G1, G2, G3 = quadgc2[i]\n\n # Compute u_i and t_i for this quad\n t_i = __calc_t_i(G0, G1, lat, lon, proj)\n u_i = __calc_u_i(G0, G1, lat, lon, proj)\n\n # Quad length (top edge)\n l_i[i] = get_quad_length(quadgc2[i])\n\n #----------------------------------------------------------------------\n # Weight of segment, three cases\n #----------------------------------------------------------------------\n\n # Case 3: t_i == 0 and 0 <= u_i <= l_i\n w_i = np.zeros_like(t_i)\n\n # Case 1:\n ix = t_i != 0\n w_i[ix] = (1.0 / t_i[ix]) * (np.arctan((l_i[i] -\n u_i[ix]) / t_i[ix]) - np.arctan(-u_i[ix] / t_i[ix]))\n\n # Case 2:\n ix = (t_i == 0) & ((u_i < 0) | (u_i > l_i[i]))\n w_i[ix] = 1 / (u_i[ix] - l_i[i]) - 1 / u_i[ix]\n\n totweight = totweight + w_i\n GC2T = GC2T + w_i * t_i\n\n if n_groups == 1:\n GC2U = GC2U + w_i * (u_i + s_i)\n else:\n if i == 0:\n qind = np.array(range(len(quadgc2)))\n l_kj = 0\n s_ij_1 = 0\n else:\n l_kj = l_i[(group_ind_np == group_ind_np[i]) & (qind < i)]\n s_ij_1 = np.sum(l_kj)\n\n # First endpoint in the current 'group' (or 'trace' in GC2 terms)\n p1 = Vector.fromPoint(quadgc2[iq0[group_ind[i]]][0])\n s_ij_2 = (p1 - p_origin).dot(np.sign(E) * ahat) / 1000.0\n\n # Above is GC2N, for GC2T use:\n #s_ij_2 = (p1 - p_origin).dot(bhat) / 1000.0\n\n s_ij = s_ij_1 + s_ij_2\n GC2U = GC2U + w_i * (u_i + s_ij)\n\n s_i = s_i + l_i[i]\n\n GC2T = GC2T / totweight\n GC2U = GC2U / totweight\n\n # Dictionary for holding the distances\n distdict = dict()\n\n distdict['T'] = copy.deepcopy(GC2T).reshape(oldshape)\n distdict['U'] = copy.deepcopy(GC2U).reshape(oldshape)\n\n # Take care of Rx\n Rx = copy.deepcopy(GC2T) # preserve sign (no absolute value)\n Rx = Rx.reshape(oldshape)\n distdict['rx'] = Rx\n\n # Ry\n Ry = GC2U - s_i / 2.0\n Ry = Ry.reshape(oldshape)\n distdict['ry'] = Ry\n\n # Ry0\n Ry0 = np.zeros_like(GC2U)\n ix = GC2U < 0\n Ry0[ix] = np.abs(GC2U[ix])\n if n_groups > 1:\n s_i = s_ij + l_i[-1]\n ix = GC2U > s_i\n Ry0[ix] = GC2U[ix] - s_i\n Ry0 = Ry0.reshape(oldshape)\n distdict['ry0'] = Ry0\n\n return distdict", "def find_concave_outline(shape_list: List[List[Tuple[float, float]]]):\n # Find the most lower-right point\n current_shape = shape_list[0]\n current_pt = current_shape[0]\n test_idx = 1\n next_test_dir = 1\n for s in shape_list:\n for i in range(len(s)):\n p = s[i]\n if ((p[0] < current_pt[0]) or\n (p[0] == current_pt[0] and p[1] < current_pt[1])):\n # Replace\n current_pt = p\n current_shape = s\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n vertex_list = [current_pt]\n # Keep going until you reach back to the first point\n while not _point_equal(current_shape[test_idx], vertex_list[0]):\n # Iterate through all the shapes to try to find a matching edge\n checking = True\n for s in (s for s in shape_list if not _arr_eq(s, current_shape)):\n if checking: # Way to break out if match found\n for i in range(len(s)):\n spt = s[i]\n if _point_equal(current_pt, spt):\n spt_after = s[(i+1) % len(s)]\n spt_before = s[(i-1) % len(s)]\n test_pt = current_shape[test_idx]\n if _point_equal(test_pt, spt_after):\n test_idx = (i-1) % len(s)\n next_test_dir = -1\n current_shape = s\n checking = False\n elif _point_equal(test_pt, spt_before):\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n current_shape = s\n checking = False\n # Have you exhausted all shapes?\n if checking:\n current_pt = current_shape[test_idx]\n vertex_list.append(current_pt)\n test_idx += next_test_dir\n test_idx %= len(current_shape)\n return vertex_list", "def getpviolcon(self,whichsol_,sub_,viol_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _viol_minlength = (num_)\n if (num_) > 0 and viol_ is not None and len(viol_) != (num_):\n raise ValueError(\"Array argument viol is not long enough: Is %d, expected %d\" % (len(viol_),(num_)))\n if isinstance(viol_,numpy.ndarray) and not viol_.flags.writeable:\n raise ValueError(\"Argument viol must be writable\")\n if viol_ is None:\n raise ValueError(\"Argument viol may not be None\")\n if isinstance(viol_, numpy.ndarray) and viol_.dtype is numpy.dtype(numpy.float64) and viol_.flags.contiguous:\n _viol_copyarray = False\n _viol_tmp = ctypes.cast(viol_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif viol_ is not None:\n _viol_copyarray = True\n _viol_np_tmp = numpy.zeros(len(viol_),numpy.dtype(numpy.float64))\n _viol_np_tmp[:] = viol_\n assert _viol_np_tmp.flags.contiguous\n _viol_tmp = ctypes.cast(_viol_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _viol_copyarray = False\n _viol_tmp = None\n \n res = __library__.MSK_XX_getpviolcon(self.__nativep,whichsol_,num_,_sub_tmp,_viol_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _viol_copyarray:\n viol_[:] = _viol_np_tmp", "def points2contour(points):\n return points.reshape(-1, 1, 2)", "def smooth_contour(self, radius, max_diff, points_number, f_tot):\n min_angle = radius.argmin()\n istart = min_angle\n\n xmins2 = np.copy(radius)\n xmaxs = np.copy(radius)\n\n changed = True\n\n def cut_rotate(start, step):\n last_ok = False\n iteration = 0\n any_cut = False\n while not (iteration >= points_number and last_ok):\n current = (start + iteration * step) % points_number\n previous = (current - 1 * step) % points_number\n\n if xmins2[current] - xmins2[previous] > max_diff[xmins2[previous]]:\n xmaxs[current] = xmins2[previous] + max_diff[xmins2[previous]]\n f_tot_slice = f_tot[current, :xmaxs[current] + 1]\n xmins2[current] = f_tot_slice.argmin()\n if step < 0:\n xmins2[current] = max(xmins2[current], xmins2[previous] - max_diff[xmins2[previous]])\n last_ok = False\n any_cut = True\n else:\n last_ok = True\n\n iteration += 1\n return (start + iteration * step) % points_number, any_cut\n\n current_position = istart\n while changed:\n changed = False\n\n current_position, any_cut = cut_rotate(current_position, 1)\n changed = changed or any_cut\n\n current_position, any_cut = cut_rotate(current_position, -1)\n changed = changed or any_cut\n\n return xmins2, xmaxs", "def _computeGC2(rupture, lon, lat, depth):\n\n quadlist = rupture.getQuadrilaterals()\n quadgc2 = copy.deepcopy(quadlist)\n\n oldshape = lon.shape\n\n if len(oldshape) == 2:\n newshape = (oldshape[0] * oldshape[1], 1)\n else:\n newshape = (oldshape[0], 1)\n\n # -------------------------------------------------------------------------\n # Define a projection that spans sites and rupture\n # -------------------------------------------------------------------------\n\n all_lat = np.append(lat, rupture.lats)\n all_lon = np.append(lon, rupture.lons)\n\n west = np.nanmin(all_lon)\n east = np.nanmax(all_lon)\n south = np.nanmin(all_lat)\n north = np.nanmax(all_lat)\n proj = OrthographicProjection(west, east, north, south)\n\n totweight = np.zeros(newshape, dtype=lon.dtype)\n GC2T = np.zeros(newshape, dtype=lon.dtype)\n GC2U = np.zeros(newshape, dtype=lon.dtype)\n\n # -------------------------------------------------------------------------\n # First sort out strike discordance and nominal strike prior to\n # starting the loop if there is more than one group/trace.\n # -------------------------------------------------------------------------\n group_ind = rupture._getGroupIndex()\n\n # Need group_ind as numpy array for sensible indexing...\n group_ind_np = np.array(group_ind)\n uind = np.unique(group_ind_np)\n n_groups = len(uind)\n\n if n_groups > 1:\n # ---------------------------------------------------------------------\n # The first thing we need to worry about is finding the coordinate\n # shift. U's origin is \"selected from the two endpoints most\n # distant from each other.\"\n # ---------------------------------------------------------------------\n\n # Need to get index of first and last quad\n # for each segment\n iq0 = np.zeros(n_groups, dtype='int16')\n iq1 = np.zeros(n_groups, dtype='int16')\n for k in uind:\n ii = [i for i, j in enumerate(group_ind) if j == uind[k]]\n iq0[k] = int(np.min(ii))\n iq1[k] = int(np.max(ii))\n\n # ---------------------------------------------------------------------\n # This is an iterator for each possible combination of traces\n # including trace orientations (i.e., flipped).\n # ---------------------------------------------------------------------\n\n it_seg = it.product(it.combinations(uind, 2),\n it.product([0, 1], [0, 1]))\n\n # Placeholder for the trace pair/orientation that gives the\n # largest distance.\n dist_save = 0\n\n for k in it_seg:\n s0ind = k[0][0]\n s1ind = k[0][1]\n p0ind = k[1][0]\n p1ind = k[1][1]\n if p0ind == 0:\n P0 = quadlist[iq0[s0ind]][0]\n else:\n P0 = quadlist[iq1[s0ind]][1]\n if p1ind == 0:\n P1 = quadlist[iq1[s1ind]][0]\n else:\n P1 = quadlist[iq0[s1ind]][1]\n\n dist = geodetic.distance(P0.longitude, P0.latitude, 0.0,\n P1.longitude, P1.latitude, 0.0)\n if dist > dist_save:\n dist_save = dist\n A0 = P0\n A1 = P1\n\n # ---------------------------------------------------------------------\n # A0 and A1 are the furthest two segment endpoints, but we still\n # need to sort out which one is the \"origin\".\n # ---------------------------------------------------------------------\n\n # This goofy while-loop is to adjust the side of the rupture where the\n # origin is located\n dummy = -1\n while dummy < 0:\n A0.depth = 0\n A1.depth = 0\n p_origin = Vector.fromPoint(A0)\n a0 = Vector.fromPoint(A0)\n a1 = Vector.fromPoint(A1)\n ahat = (a1 - a0).norm()\n\n # Loop over traces\n e_j = np.zeros(n_groups)\n b_prime = [None] * n_groups\n for j in range(n_groups):\n P0 = quadlist[iq0[j]][0]\n P1 = quadlist[iq1[j]][1]\n P0.depth = 0\n P1.depth = 0\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n b_prime[j] = p1 - p0\n e_j[j] = ahat.dot(b_prime[j])\n E = np.sum(e_j)\n\n # List of discordancy\n dc = [np.sign(a) * np.sign(E) for a in e_j]\n b = Vector(0, 0, 0)\n for j in range(n_groups):\n b.x = b.x + b_prime[j].x * dc[j]\n b.y = b.y + b_prime[j].y * dc[j]\n b.z = b.z + b_prime[j].z * dc[j]\n bhat = b.norm()\n dummy = bhat.dot(ahat)\n if dummy < 0:\n tmpA0 = copy.deepcopy(A0)\n A0 = copy.deepcopy(A1)\n A1 = tmpA0\n\n # ---------------------------------------------------------------------\n # To fix discordancy, need to flip quads and rearrange\n # the order of quadgc2\n # ---------------------------------------------------------------------\n\n # 1) flip quads\n for i in range(len(quadgc2)):\n if dc[group_ind[i]] < 0:\n quadgc2[i] = reverse_quad(quadgc2[i])\n\n # 2) rearrange quadlist order\n qind = np.arange(len(quadgc2))\n for i in range(n_groups):\n qsel = qind[group_ind_np == uind[i]]\n if dc[i] < 0:\n qrev = qsel[::-1]\n qind[group_ind_np == uind[i]] = qrev\n\n quadgc2old = copy.deepcopy(quadgc2)\n for i in range(len(qind)):\n quadgc2[i] = quadgc2old[qind[i]]\n\n # End of if-statement for adjusting group discordancy\n\n s_i = 0.0\n l_i = np.zeros(len(quadgc2))\n\n for i in range(len(quadgc2)):\n G0, G1, G2, G3 = quadgc2[i]\n\n # Compute u_i and t_i for this quad\n t_i = __calc_t_i(G0, G1, lat, lon, proj)\n u_i = __calc_u_i(G0, G1, lat, lon, proj)\n\n # Quad length (top edge)\n l_i[i] = get_quad_length(quadgc2[i])\n\n # ---------------------------------------------------------------------\n # Weight of segment, three cases\n # ---------------------------------------------------------------------\n\n # Case 3: t_i == 0 and 0 <= u_i <= l_i\n w_i = np.zeros_like(t_i)\n\n # To avoid division by zero in totweight later on:\n ix = (t_i == 0) & (0 <= u_i) & (u_i <= l_i[i])\n totweight[ix] = 1.0\n\n # Case 1:\n ix = t_i != 0\n w_i[ix] = (1.0 / t_i[ix]) * (np.arctan(\n (l_i[i] - u_i[ix]) / t_i[ix]) - np.arctan(-u_i[ix] / t_i[ix]))\n\n # Case 2:\n ix = (t_i == 0) & ((u_i < 0) | (u_i > l_i[i]))\n w_i[ix] = 1 / (u_i[ix] - l_i[i]) - 1 / u_i[ix]\n\n totweight = totweight + w_i\n GC2T = GC2T + w_i * t_i\n\n if n_groups == 1:\n GC2U = GC2U + w_i * (u_i + s_i)\n else:\n if i == 0:\n qind = np.array(range(len(quadgc2)))\n l_kj = 0\n s_ij_1 = 0\n else:\n l_kj = l_i[(group_ind_np == group_ind_np[i]) & (qind < i)]\n s_ij_1 = np.sum(l_kj)\n\n # First endpoint in the current 'group' (or 'trace' in GC2 terms)\n p1 = Vector.fromPoint(quadgc2[iq0[group_ind[i]]][0])\n s_ij_2 = (p1 - p_origin).dot(np.sign(E) * ahat) / 1000.0\n\n # Above is GC2N, for GC2T use:\n # s_ij_2 = (p1 - p_origin).dot(bhat) / 1000.0\n\n s_ij = s_ij_1 + s_ij_2\n GC2U = GC2U + w_i * (u_i + s_ij)\n\n s_i = s_i + l_i[i]\n\n GC2T = GC2T / totweight\n GC2U = GC2U / totweight\n\n # Dictionary for holding the distances\n distdict = dict()\n\n distdict['T'] = copy.deepcopy(GC2T).reshape(oldshape)\n distdict['U'] = copy.deepcopy(GC2U).reshape(oldshape)\n\n # Take care of Rx\n Rx = copy.deepcopy(GC2T) # preserve sign (no absolute value)\n Rx = Rx.reshape(oldshape)\n distdict['rx'] = Rx\n\n # Ry\n Ry = GC2U - s_i / 2.0\n Ry = Ry.reshape(oldshape)\n distdict['ry'] = Ry\n\n # Ry0\n Ry0 = np.zeros_like(GC2U)\n ix = GC2U < 0\n Ry0[ix] = np.abs(GC2U[ix])\n if n_groups > 1:\n s_i = s_ij + l_i[-1]\n ix = GC2U > s_i\n Ry0[ix] = GC2U[ix] - s_i\n Ry0 = Ry0.reshape(oldshape)\n distdict['ry0'] = Ry0\n\n return distdict", "def line_contribution(p1,p2,alpha = 1):\n\n adjust = np.zeros((worksize,worksize,2))\n\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n\n coordinates = coordinate_matrix(worksize)\n numerator = np.sum(np.multiply(coordinates,np.reshape(np.array(((y2-y1,-(x2-x1)))),(2,1,1))),axis = 0) + x2*y1 - y2*x1\n dist_from_line = np.abs(numerator) * (1.0/np.sqrt((y2-y1)**2+(x2-x1)**2))\n xcontribution = (x2-x1)*(1/(alpha*dist_from_line+1))\n ycontribution = (y2-y1)*(1/(alpha*dist_from_line+1))\n\n\n return np.array((-ycontribution,xcontribution))/np.sqrt((y2-y1)**2+(x2-x1)**2)", "def extreme(poly1):\n if poly1.vertices is not None:\n # In case vertices already stored\n return poly1.vertices\n V = np.array([])\n R = np.array([])\n if isinstance(poly1, Region):\n raise Exception(\"extreme: not executable for regions\")\n # `poly1` is a `Polytope`\n poly1 = reduce(poly1) # Need to have polytope non-redundant!\n if not is_fulldim(poly1):\n return None\n # `poly1` isn't flat\n A = poly1.A.copy()\n b = poly1.b.copy()\n sh = np.shape(A)\n nc = sh[0]\n nx = sh[1]\n # distinguish cases by dimension\n if nx == 1:\n # Polytope is a 1-dim line\n for ii in range(nc):\n V = np.append(V, b[ii] / A[ii])\n if len(A) == 1:\n R = np.append(R, 1)\n raise Exception(\"extreme: polytope is unbounded\")\n elif nx == 2:\n # Polytope is 2D\n alf = np.angle(A[:, 0] + 1j * A[:, 1])\n I = np.argsort(alf)\n H = np.vstack([A, A[0, :]])\n K = np.hstack([b, b[0]])\n I = np.hstack([I, I[0]])\n for ii in range(nc):\n HH = np.vstack([H[I[ii], :], H[I[ii + 1], :]])\n KK = np.hstack([K[I[ii]], K[I[ii + 1]]])\n if np.linalg.cond(HH) == np.inf:\n R = np.append(R, 1)\n raise Exception(\"extreme: polytope is unbounded\")\n else:\n try:\n v = np.linalg.solve(HH, KK)\n except Exception:\n msg = 'Finding extreme points failed, '\n msg += 'Check if any unbounded Polytope '\n msg += 'is causing this.'\n raise Exception(msg)\n if len(V) == 0:\n V = np.append(V, v)\n else:\n V = np.vstack([V, v])\n else:\n # General nD method,\n # solve a vertex enumeration problem for\n # the dual polytope\n rmid, xmid = cheby_ball(poly1)\n A = poly1.A.copy()\n b = poly1.b.copy()\n sh = np.shape(A)\n Ai = np.zeros(sh)\n for ii in range(sh[0]):\n Ai[ii, :] = A[ii, :] / (b[ii] - np.dot(A[ii, :], xmid))\n Q = reduce(qhull(Ai))\n if not is_fulldim(Q):\n return None\n # `Q` isn't flat\n H = Q.A\n K = Q.b\n sh = np.shape(H)\n nx = sh[1]\n V = np.zeros(sh)\n for iv in range(sh[0]):\n for ix in range(nx):\n V[iv, ix] = H[iv, ix] / K[iv] + xmid[ix]\n a = V.size / nx\n if not a.is_integer():\n raise AssertionError(a)\n a = int(a)\n poly1.vertices = V.reshape((a, nx))\n return poly1.vertices" ]
[ "0.6073888", "0.5673097", "0.5591408", "0.5486369", "0.5361581", "0.53499943", "0.5272163", "0.52631676", "0.5168495", "0.51550204", "0.5142952", "0.51347584", "0.51139116", "0.5096012", "0.5069017", "0.50188994", "0.5007644", "0.5004071", "0.50028485", "0.49921945", "0.49831757", "0.49590698", "0.49588722", "0.49582735", "0.4947933", "0.487605", "0.48688474", "0.4868501", "0.48627523", "0.48605618", "0.48031333", "0.4797058", "0.47956073", "0.4788485", "0.47870213", "0.47815686", "0.47811744", "0.47791183", "0.47736126", "0.47641703", "0.47637057", "0.47629726", "0.47582105", "0.4754035", "0.4749097", "0.4748317", "0.4746762", "0.47418836", "0.47395498", "0.47365648", "0.47276533", "0.47130123", "0.47025287", "0.46955192", "0.46925482", "0.46901432", "0.46841642", "0.4683476", "0.4681456", "0.46768612", "0.46692744", "0.4665085", "0.46608427", "0.46608374", "0.46567172", "0.465432", "0.4634909", "0.4625043", "0.46216705", "0.46166608", "0.4614781", "0.4614156", "0.46039575", "0.46039176", "0.46025518", "0.459988", "0.4597091", "0.45913112", "0.4591185", "0.45799148", "0.4577729", "0.45760435", "0.45701107", "0.45574284", "0.45539674", "0.45475563", "0.45435074", "0.45392644", "0.4538051", "0.45361075", "0.45324138", "0.45298174", "0.45278132", "0.45243156", "0.452056", "0.45162612", "0.4514161", "0.4514055", "0.45134148", "0.45111838" ]
0.6092272
0
Calculate the contour area by the function cv2.contourArea() or from moments, M["m00"].
def __CalculateArea(self, contour): return cv2.contourArea(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(cnt):\n\treturn cv2.contourArea(cnt)", "def get_contour_area(contour):\n assert isinstance(contour, np.ndarray), 'contour should be a numpy array'\n return cv2.contourArea(contour)", "def __CalculateMoments(self, contour):\r\n return cv2.moments(contour)", "def get_max_area(contours):\n max_area = 0\n for c in contours:\n temp = cv2.contourArea(c)\n if temp > max_area:\n max_area = temp\n\n return max_area", "def calcZmArea(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmArea = sum(self.zmareas)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmArea = sum(self.zmareas) * self.stratum.A2 / self.stratum.Aij\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmArea = self.stratum.LT / self.stratum.LN * self.meanZmArea() * self.stratum.Ni\n return self.ZmArea", "def calcZmAreaVar(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmAreaVar = sum(self.zmvars)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmAreaVar = 0\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmAreaVar = ( (self.stratum.LT / self.stratum.LN) ** 2 ) * (((self.stratum.Ni ** 2) * (1 - self.ni / self.stratum.Ni) * self.variance()) / self.ni) + ((self.stratum.Ni / self.ni) * sum(self.zmvars))\n return self.ZmAreaVar", "def max_contour(contours):\n if len(contours) == 0:\n return []\n else:\n max_cnt = []\n max_area = 0\n for cnt in contours:\n area = cv2.contourArea(cnt)\n # print(area)\n if area > 1000 and area > max_area:\n max_area = area\n max_cnt = cnt\n return max_cnt", "def center_of_contour(contorno):\n M = cv2.moments(contorno)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"]!=0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (int(cX), int(cY))\n else:\n return (200,150)", "def get_area_box(contours_points):\n rect = cv2.minAreaRect(np.array(contours_points))\n box = cv2.cv.BoxPoints(rect)\n box = np.array(box)\n return map(tuple, box)", "def filter_area( contours, debug=False ):\r\n ret = []\r\n\r\n for x in contours:\r\n area = cv2.contourArea( x )\r\n if area > MIN_AREA and area < MAX_AREA:\r\n if debug:\r\n print \"Area\", area\r\n ret.append( x )\r\n return( ret )", "def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid", "def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)", "def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)", "def calculate_area(surfname,fwhm):\n try:\n subprocess.call(\"depth_potential -area_voronoi \" + surfname + \" /tmp/tmp_area.txt\",shell=True)\n subprocess.call(\"depth_potential -smooth \" + str(fwhm) + \" /tmp/tmp_area.txt \" + surfname + \" /tmp/sm_area.txt\",shell=True)\n area=np.loadtxt(\"/tmp/sm_area.txt\")\n subprocess.call(\"rm /tmp/sm_area.txt /tmp/tmp_area.txt\",shell=True)\n except OSError:\n print(\"depth_potential not found, please install CIVET tools or replace with alternative area calculation/data smoothing\")\n return 0;\n return area;", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def convex_hull_area( contours, debug= False ):\r\n ret_areas = []\r\n ret_hulls = []\r\n for c in contours:\r\n hull = cv2.convexHull( c )\r\n area = cv2.contourArea( hull )\r\n ret_areas.append( area )\r\n ret_hulls.append( hull )\r\n if( debug ):\r\n print( \"Hull area: {0}\".format( area ) )\r\n\r\n return ( ret_areas, ret_hulls )", "def update_contour():\n global contour_center\n global contour_area\n\n image = rc.camera.get_color_image()\n\n if image is None:\n contour_center = None\n contour_area = 0\n else:\n # Find all of the orange contours\n contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])\n\n # Select the largest contour\n contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)\n\n if contour is not None:\n # Calculate contour information\n contour_center = rc_utils.get_contour_center(contour)\n contour_area = rc_utils.get_contour_area(contour)\n\n # Draw contour onto the image\n rc_utils.draw_contour(image, contour)\n rc_utils.draw_circle(image, contour_center)\n\n else:\n contour_center = None\n contour_area = 0\n\n # Display the image to the screen\n rc.display.show_color_image(image)", "def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)", "def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width", "def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)", "def get_image_moments(image=None, contour=None, threshold=3):\n\tif contour is None and image is not None:\n\t\tcontour = get_contour(image, threshold)\n\treturn cv2.moments(contour)", "def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)", "def __CalculateExtend(self, contour):\r\n area = self.__CalculateArea(contour)\r\n boundingBox = self.__CalculateBoundingBox(contour)\r\n return area / (boundingBox[2] * boundingBox[3])", "def __CalculateBoundingBox(self, contour):\r\n return cv2.boundingRect(contour)", "def get_dimensions_from_contour(img, cntr, kernel):\n\tmask = np.zeros_like(img) # mask will contain the fitted and adjusted ellipse of a single obstacle\n\tellipse = cv2.fitEllipse(cntr)\n\tx, y, obj_length, obj_height = cv2.boundingRect(cntr)\n\trect = cv2.minAreaRect(cntr)\n\n\tequi_diameter = obj_length # bounding rectangle gives a better approximation of diameter\n\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\tmask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1) # draw the fitted ellipse\n\trows = mask.shape[0]\n\tcols = mask.shape[1]\n\tM = np.float32([[1, 0, 0], [0, 1, equi_diameter / 4]]) # shift mask down to match obstacle, not edge\n\tmask = cv2.warpAffine(mask, M, (cols, rows))\n\tmask = cv2.erode(mask, kernel, iterations=3) # erode the mask to remove background points\n\treturn mask, box, x, y, obj_length, obj_height", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)", "def compute_mesh_area_numpy(mesh):\n pass", "def get_contour_centroid(contour):\n M = cv2.moments(contour)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)", "def find_max_contour(contours):\n if len(contours)==0:\n return []\n max_contour_area = cv2.contourArea(contours[0])\n max_contour = contours[0]\n max_index = 0\n for i, cnt in enumerate(contours):\n area = cv2.contourArea(cnt)\n if area>max_contour_area:\n max_contour=cnt\n max_contour_area=area\n max_index = i\n epsilon = 0.02*cv2.arcLength(max_contour,True)\n max_contour = cv2.approxPolyDP(max_contour,epsilon,True)\n area = cv2.contourArea(max_contour)\n if area < 600:\n return []\n return max_contour", "def extract_area(data,box):\n if box is None or box[0] is None or box[1] is None or box[1][0] - box[0][0] == 0 or box[1][1] - box[0][1] == 0:\n box = ((0,0),(10,10));\n area = ut.extract_area(data['frame'],*box,data['uc'],256);\n return area;", "def area(boxes):\n y_min, x_min, y_max, x_max = np.split(boxes, 4, axis=-1)\n return np.squeeze((y_max - y_min) * (x_max - x_min), [1])", "def transform_contour(contour: np.ndarray, M):\n return cv2.transform(contour[:, np.newaxis], m=M)[:, 0, :2]", "def moments(data):\n total = data.sum()\n if total != 0.:\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n else:\n height=0\n x=0\n y=0\n width_x=0\n width_y=0\n return height,np.sqrt(width_x**2 + width_y**2)", "def compute_mesh_area(mesh):\n vertices = mesh.vertices\n faces = mesh.faces\n areas = [compute_triangle_area(vertices[face]) for face in faces]\n mesh_surface_area = sum(areas)\n return mesh_surface_area", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def find_center_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] == 0:\n (x, y), _ = cv2.minEnclosingCircle(contour)\n cR = int(y)\n cC = int(x)\n # raise ValueError(\"Contour too small to find a new center.\")\n else:\n cR = int(M[\"m01\"] / M[\"m00\"])\n cC = int(M[\"m10\"] / M[\"m00\"])\n return (cR, cC)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def moments(self, data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return x, y, width_x, width_y, height", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area", "def get_max_area_contour(contour_list):\n contour_areas = np.array([cv2.contourArea(c) for c in contour_list])\n max_area = contour_areas.max()\n max_ind = contour_areas.argmax()\n max_contour = contour_list[max_ind]\n return max_contour, max_area", "def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]", "def compute_area(boxes: Type[Union[Tensor, np.ndarray]]):\n if isinstance(boxes, Tensor):\n return compute_area_pt(boxes)\n return ((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))", "def __CalculateRotatedBox(self, contour):\r\n rectangle = cv2.minAreaRect(contour)\r\n box = cv2.boxPoints(rectangle)\r\n return np.int0(box)", "def area_stats(self):\r\n x0 = self.ui.x0.value()\r\n x1 = self.ui.x1.value()\r\n y0 = self.ui.y0.value()\r\n y1 = self.ui.y1.value()\r\n \r\n channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())\r\n\r\n avg = np.average(self.npImg[y0:y1,x0:x1,channel])\r\n std = np.std(self.npImg[y0:y1,x0:x1,channel])\r\n minimum = np.min(self.npImg[y0:y1,x0:x1,channel])\r\n maximum = np.max(self.npImg[y0:y1,x0:x1,channel])\r\n \r\n n = (max(x1,x0)-min(x1,x0))*(max(y1,y0)-min(y1,y0))\r\n\r\n \r\n logging.info(\"### Statistics for area x: {:d} - {:d}; y: {:d} - {:d} ###\".format(x0,x1,y0,y1))\r\n logging.info(\"channel: {!s}\".format(self.ui.channel_selection.currentText()))\r\n logging.info(\"average: {:.3f} +- {:.3f}\".format(avg,std/np.sqrt(n)))\r\n logging.info(\"standard deviation: {:.3f}\".format(std))\r\n logging.info(\"maximum: {:d}\".format(maximum)) \r\n logging.info(\"minimum: {:d}\".format(minimum))\r\n logging.info(\"--------------------------------------------------------------\")", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def moments(data):\n\n data = np.absolute(data)\n total = data.sum()\n X = np.indices(data.shape)\n x = (X*data).sum()/total\n width = np.sqrt((((X-x)**2)*data).sum()/data.sum())\n m_max = data.max()\n m_min = data.min()\n if np.absolute(m_max) >= np.absolute(m_min):\n height = m_max\n else:\n height = m_min\n return height, x, width", "def meanZmArea(self):\n sumArea = 0\n for site in self.sites:\n sumArea = sumArea + site.siteZmArea\n meanArea = sumArea / self.countSites()\n return meanArea", "def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area", "def getContours(img):\n # mode: gets only exrteme outer contours\n # method: stores all contour points\n contours, hierarchy = cv2.findContours(img, mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_NONE)\n \n x, y, w, h = 0, 0, 0, 0\n\n # improving accuracy of contours\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 500:\n # assumes cnt with area > 500 is closed contour \n perimeter = cv2.arcLength(cnt, True)\n\n # approximate polygonal curve of cnt\n # set max difference between original and approxCurve as \n # 0.02 * perimeter\n approxCurve = cv2.approxPolyDP(cnt, 0.02 * perimeter, closed=True)\n \n x, y, w, h = cv2.boundingRect(approxCurve)\n # wand point is is center-left tip\n return x + (w // 2), y", "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def get_centroid(moments):\n if moments['m00'] > 0:\n centroid_x = moments['m10']/moments['m00']\n centroid_y = moments['m01']/moments['m00']\n else:\n centroid_x = 0.0\n centroid_y = 0.0\n return centroid_x, centroid_y", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-x)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-y)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def solidity(cnt):\n\tarea = cv2.contourArea(cnt)\n\thull = cv2.convexHull(cnt)\n\thull_area = cv2.contourArea(hull)\n\treturn float(area) / hull_area", "def getArea(self):\r\n return np.sum(self.array[:])", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def extent(cnt):\n\tarea = cv2.contourArea(cnt)\n\tx, y, w, h = cv2.boundingRect(cnt)\n\trect_area = w*h\n\treturn float(area)/rect_area", "def find_center( contours ):\r\n ret = []\r\n\r\n for x in contours:\r\n M = cv2.moments( x )\r\n pt = Point()\r\n pt.x = int( M['m10']/M['m00'] )\r\n pt.y = int( M['m01']/M['m00'] )\r\n\r\n ret.append( pt )\r\n\r\n return( ret );", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def area(boxes: Union[np.array, torch.Tensor]) -> Union[np.array, torch.Tensor]:\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area_proportion(error_matrix, wh):\r\n\r\n return np.nansum(_area_estimate_matrix(error_matrix, wh), axis=0)", "def getArea(self, p1, p2, p3):\n matrix = [p1.normalVector, p2.normalVector, p3.normalVector, [1,1,1,1]]\n matrix = np.rot90(matrix)\n return abs(np.linalg.det(matrix))/2.0", "def compute_mesh_area_smart(mesh):\n mesh_surface_area = mesh.area\n return mesh_surface_area", "def moments(data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def _handle_amcl_pose(self, data):\n cov = data.pose.covariance\n if cov != None and len(data.pose.covariance) == 36:\n try:\n cov = np.reshape(cov,(6,6))\n if cov.shape[0] == 6 and cov.shape[1] == 6:\n a, b, _ = self._calc_ellipse(cov)\n\n self.area_ellips = a * b * math.pi\n else:\n rospy.loginfo(\"shape wrong\")\n except:\n rospy.loginfo(\"covariance exception\")\n else:\n rospy.logerr(\"wrong length of array\")", "def calculate_area(boxes):\n box_dimension = len(boxes.size())\n if (box_dimension == 1) and (boxes.size()[0] != 0):\n return (boxes[3] - boxes[1] + 1) * (boxes[2] - boxes[0] + 1)\n elif box_dimension == 2:\n return (boxes[:, 3] - boxes[:, 1] + 1) * (boxes[:, 2] - boxes[:, 0] + 1)\n else:\n return torch.tensor([])", "def total_area(self) :\n area = 0\n for i in self.residues :\n area += i.solvent_acc_area\n return area", "def moments(data,x0=None,y0=None):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n if x0 is None:\n return height, x, y, width_x, width_y, 0.0, 0.0\n else:\n xstep = x0[1] - x0[0]\n ystep = y0[1] - y0[0]\n return height, x*xstep+x0[0], y*ystep+y0[0], width_x*xstep, width_y*ystep, 0.0, 0.0", "def contour_detect(img_bin, min_area=0, max_area=-1, wh_ratio=2.0):\n rects = []\n _, contours, _ = cv2.findContours(img_bin.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if len(contours) == 0:\n return rects\n\n max_area = img_bin.shape[0]*img_bin.shape[1] if max_area<0 else max_area\n for contour in contours:\n area = cv2.contourArea(contour)\n if area >= min_area and area <= max_area:\n x, y, w, h = cv2.boundingRect(contour)\n if 1.0*w/h < wh_ratio and 1.0*h/w < wh_ratio:\n rects.append([x,y,w,h])\n return rects", "def area(self):\n return np.array([f.area() for f in self])", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def center_directions(contours, image: ndarray):\n return contours_.contour_average_center(contours)", "def area(self):\n return self._ned_shape.area", "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def surface_area(DEM, resolution):\n\n resolution_squared = resolution ** 2.\n cross_distance_squared = 2.0 * (resolution ** 2.)\n\n m1 = ((DEM[0:-1, 0:-1] - DEM[0:-1, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m2 = ((DEM[0:-1, 0:-1] - DEM[1:, 0:-1]) ** 2.0 + resolution_squared) ** 0.5\n m3 = ((DEM[0:-1, 0:-1] - DEM[1:, 1:]) ** 2.0 + cross_distance_squared) ** 0.5\n m4 = ((DEM[0:-1, 1:] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m5 = ((DEM[1:, 0:-1] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n\n #from pdb import set_trace; set_trace()\n # Heron's formula for computing the area of a triangle, knowing 3 sides lengths,\n # requires a semiperimeter variable \"s\"\n s1 = 0.5 * (m3 + m5 + m2)\n s2 = 0.5 * (m3 + m4 + m1)\n\n # Calculate area using Heron's formula. This computes the upper and lower triangle area for each set of 4 dem points\n area = np.sum(np.sqrt(s1 * (s1 - m3) * (s1 - m5) * (s1 - m2))) + np.sum(np.sqrt(s2 * (s2 - m3) * (s2 - m4) * (s2 - m1)))\n\n return area", "def area(self):\n\n return (self.x1 - self.x0) * (self.y1 - self.y0)", "def get_centroid(M):\t\n\treturn int(M['m10']/M['m00']), int(M['m01']/M['m00'])", "def center_of_mass(mask):\n M = cv2.moments(mask)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [int(cX), int(cY)]", "def _get_filter_target_area(self, shape, dim):\n if shape == \"circle\":\n miholedim = 0.5 * dim\n maholedim = 1.25 * dim\n mi = miholedim**2 * 3.1415\n ma = maholedim**2 * 3.1415\n else:\n d = (2 * dim) ** 2\n mi = 0.5 * d\n ma = 1.25 * d\n\n return mi, ma", "def area(bboxes: np.array) -> np.ndarray:\n if bboxes.ndim == 1:\n bboxes = np.expand_dims(bboxes, axis=0)\n w = (bboxes[:, 2] - bboxes[:, 0]).clip(min=0)\n h = (bboxes[:, 3] - bboxes[:, 1]).clip(min=0)\n return w * h", "def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts", "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic", "def getCharacterContour(space, debug=False):\r\n\r\n contours = cv2.findContours(space, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = imutils.grab_contours(contours)\r\n\r\n # Sort contours by area, descending\r\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\r\n\r\n totalArea = space.shape[0] * space.shape[1]\r\n\r\n for contour in contours:\r\n\r\n area = cv2.contourArea(contour)\r\n hull = cv2.convexHull(contour)\r\n hull_area = cv2.contourArea(hull)\r\n\r\n if debug:\r\n cv2.drawContours(space, contours, 0, (128, 255, 60), 2)\r\n cv2.imshow('space', space)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n if cv2.contourArea(cv2.convexHull(contour)) < totalArea * 0.05:\r\n break\r\n\r\n if cv2.contourArea(cv2.convexHull(contour)) < totalArea * 0.95:\r\n return contour\r\n\r\n return None", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def get_artif_area(self):\n result = self.cities.all().aggregate(total=Sum(\"surface_artif\"))\n return result[\"total\"] or 0", "def get_cntr_points_center(self, contour, img_size):\n res_arr = []\n \n # first border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = 0\n while curr_row != -1:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row -1\n curr_col = curr_col +1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n \n # second border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = img_size -1\n while curr_row != img_size:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row +1\n curr_col = curr_col -1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n return res_arr", "def goodPlace(contour):\n perimeter = cv2.arcLength(contour, True)\n x, y, w, h = cv2.boundingRect(contour)\n if y<=0 or x<=0:\n return False\n elif y+h >=2016 or x+w>=3840:\n return False\n return True", "def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)", "def get_contours(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n return [x for x in contours if cv2.contourArea(x) > threshold_area], hierarchy", "def get_contours(mask, threshold_area):\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n return [x for x in contours if cv2.contourArea(x) > threshold_area], hierarchy", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area" ]
[ "0.7210734", "0.66500163", "0.6628211", "0.6365675", "0.614254", "0.61276436", "0.6062467", "0.5952984", "0.5937475", "0.59279317", "0.58706164", "0.57940084", "0.5752224", "0.5744717", "0.5743859", "0.5743859", "0.5719784", "0.5644189", "0.56355894", "0.5590524", "0.5581619", "0.5569854", "0.5557568", "0.5541649", "0.5538348", "0.55332536", "0.5527276", "0.55238736", "0.5488976", "0.5474375", "0.54713875", "0.5465744", "0.5433992", "0.5430834", "0.5426386", "0.54259884", "0.54259074", "0.5418764", "0.54171515", "0.54112965", "0.5404042", "0.5378403", "0.5374762", "0.53746516", "0.5369757", "0.53607893", "0.53587395", "0.53479296", "0.5346612", "0.5332544", "0.5325353", "0.53115094", "0.5310084", "0.52964735", "0.52919424", "0.52911556", "0.5281153", "0.5281153", "0.52768254", "0.52704996", "0.5269928", "0.5262848", "0.52624047", "0.52571404", "0.5256783", "0.5251306", "0.524659", "0.5238739", "0.5220585", "0.52091575", "0.520506", "0.5195083", "0.5190842", "0.51657367", "0.5152045", "0.5135836", "0.513345", "0.5127674", "0.51251036", "0.5123371", "0.5122107", "0.5107858", "0.5094603", "0.5089942", "0.50716805", "0.50670266", "0.5062404", "0.50609094", "0.5059977", "0.50567615", "0.50534374", "0.50456303", "0.5042568", "0.5041963", "0.5039512", "0.503871", "0.50331295", "0.50329596", "0.50329596", "0.502788" ]
0.7479222
0
Calculate the bouding rectangle. It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding rectangle won't be minimum. It is found by the function cv2.boundingRect().
def __CalculateBoundingBox(self, contour): return cv2.boundingRect(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundingRect(self):\n return self.rect().adjusted(-2, -2, 2, 2)", "def boundingRect(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\treturn {\"x\":x, \"y\": y, \"w\": w, \"h\": h}", "def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))", "def get_bounding_box(current_building_contour):\n x, y, w, h, = cv.boundingRect(current_building_contour[0])\n return x, y, w, h", "def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def boundingRect(self) -> QRectF:\n return self._rect.adjusted(-10, -10, 10, 10)", "def boundingRect(self):\n return QRectF(-self.innerRectangleSize + self.edge_size,\n -self.innerRectangleSize + self.edge_size,\n (self.innerRectangleSize * 2) - (self.edge_size * 2),\n (self.innerRectangleSize * 2) - (self.edge_size * 2)).normalized()", "def boundingRect(self):\n extra = self._halfLength / 2.0\n return QRectF(self._origin, QSizeF(self._end.x() - self._origin.x(),\n self._end.y() - self._origin.y())\n ).normalized().adjusted(-extra, -extra, extra, extra)", "def get_bounding_box(conture, img=None):\n\t# get approx, return index\n\t# epsilon = 0.1 * cv2.arcLength(x, True)\n\t# approx_box = cv2.approxPolyDP(x, epsilon, True)\n\t# print 'app box', approx_box # Min [[[ 56 85]] [[318 231]]]\n\t# leftpointX = approx_box[0][0][0]\n\t# print 'app box 2', leftpointX # Min [[[ 56 85]] Max [[318 231]]]\n\t# approx_box_s = int(0.9*approx_box)\n\t# print 'app box s',approx_box_s\n\t\n\t# get rectangle\n\tx, y, w, h = cv2.boundingRect(conture) # x,y: top-left coordinate\n\t# draw rectangle\n\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\tcv2.waitKey(10)\n\treturn (x, y, w, h)", "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def get_bounding_box(uv_coor, shape):\r\n\txmin = ymin = 99999\r\n\txmax = ymax = 0\r\n\tfor x, y in uv_coor:\r\n\t\txmin = min(xmin, int(x))\r\n\t\txmax = max(xmax, int(x))\r\n\t\tymin = min(ymin, int(y))\r\n\t\tymax = max(ymax, int(y))\r\n\txmin = max(0, xmin - 20)\r\n\tymin = max(0, ymin - 20)\r\n\r\n\txmax = min(shape[1], xmax + 20)\r\n\tymax = min(shape[0], ymax + 20)\r\n\r\n\treturn xmin, xmax, ymin, ymax", "def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}", "def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb", "def bounding_box(self):\n return None", "def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()", "def bounding_box(self):\n # We use the solution described in\n # https://stackoverflow.com/a/14163413\n cos_theta = np.cos(self.angle)\n sin_theta = np.sin(self.angle)\n width_x = 0.5 * self.width * cos_theta\n width_y = 0.5 * self.width * sin_theta\n height_x = 0.5 * self.height * -sin_theta\n height_y = 0.5 * self.height * cos_theta\n dx = np.sqrt(width_x**2 + height_x**2)\n dy = np.sqrt(width_y**2 + height_y**2)\n\n xmin = self.center.x - dx\n xmax = self.center.x + dx\n ymin = self.center.y - dy\n ymax = self.center.y + dy\n\n return RegionBoundingBox.from_float(xmin, xmax, ymin, ymax)", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def boundingRect(self):\n # TODO (#2398) this rectangle makes no sense, it should be\n # top left x, top left y, width, height. But for some reason\n # that doesn't play nicely with the coordinate system.\n #\n # Instead it is bottom left x, bottom left y, width height.\n return QtCore.QRectF(-9000, -6000, 18000, 12000)", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))", "def rgb_image_bounding_box(image_full_path, boundingBox, convert_bgr=False, autocrop=False):\n imgraw = cv2.imread(image_full_path, 1)\n if len(boundingBox) > 0:\n imgraw = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n\n if autocrop:\n imgshape = imgraw.shape\n mindim = np.argmin([imgshape[0], imgshape[1]])\n cropdim = mindim\n boundingBox = [0, 0, imgshape[1], imgshape[0]]\n xtra = np.abs(imgshape[0] - imgshape[1])\n boundingBox[cropdim] = xtra // 2\n boundingBox[cropdim + 2] -= xtra // 2\n imgcrop = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n else:\n imgcrop = imgraw\n\n if convert_bgr:\n imgcrop = cv2.cvtColor(imgcrop, cv2.COLOR_BGR2RGB)\n return imgcrop", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def _get_bounding_box(self, frame, bounding_offset):\n\n # Try to find board if the boundingbox is not set\n center, ellipse, mask = self.board.detect(frame)\n\n # Should not be None\n if center is None:\n print(\"skipping frame\")\n return None\n if ellipse is None:\n print(\"skipping frame\")\n return None\n if mask is None:\n print(\"skipping frame\")\n return None\n\n self.point_mask = mask\n # cv2.imshow(\"mask\", mask)\n\n x_offset = (ellipse[1][0] / 2)\n x_center = ellipse[0][0]\n\n y_offset = ellipse[1][1] / 2\n y_center = ellipse[0][1]\n\n minx = max(0, x_center - x_offset - bounding_offset)\n maxx = min(self.width, x_center + x_offset + bounding_offset)\n miny = max(0, y_center - y_offset - bounding_offset)\n maxy = min(self.height, y_center + y_offset + bounding_offset)\n return ((int(minx), int(miny)), (int(maxx), int(maxy)))", "def _get_rounded_bounding_box(\n geom: BasePolygon, width: Numeric\n ) -> Tuple[int, int, int, int]:\n return (\n geom.bounds[0] - (geom.bounds[0] % width),\n geom.bounds[1] - (geom.bounds[1] % width),\n geom.bounds[2] + (-geom.bounds[2] % width),\n geom.bounds[3] + (-geom.bounds[3] % width),\n )", "def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])", "def boundingRect(self):\n return QRectF()", "def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)", "def compute_bounding_box(homography, w, h):\n corners = np.array([[0,0],\n [0,h],\n [w,0],\n [w,h]])\n t_corners = apply_homography(corners, homography)\n return np.array([t_corners.min(axis=0),t_corners.max(axis=0)],\n dtype= np.int)\n #[top-left, btm-right]", "def draw_bounding_box(self):\n # Gets the bounding box\n xmin, ymin, xmax, ymax = self.get_bounding_box()\n\n # Gets the actual coordinates\n width = xmax - xmin\n height = ymax - ymin\n center_x = xmin + (width)/2\n center_y = ymin + (height)/2\n\n arcade.draw_rectangle_outline(center_x, center_y, width, height, (255, 0, 0))", "def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb", "def canvas_bounds(self) -> utils.BoxRegion:", "def get_bounding_box(vehicle, camera):\n\n bb_cords = ClientSideBoundingBoxes._create_bb_points(vehicle)\n cords_x_y_z = ClientSideBoundingBoxes._vehicle_to_sensor(bb_cords, vehicle, camera)[:3, :]\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\n bbox = np.transpose(np.dot(camera.calibration, cords_y_minus_z_x))\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\n return camera_bbox", "def rectArea(base, height):\n return base * height", "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def getBoundingBox(self):\n lX, lY = self.lX(), self.lY()\n return min(lX), min(lY), max(lX), max(lY)", "def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb", "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectangle(x1,y1,x2-x1,y2-y1)", "def rect_area(rect):\n return rect[2] * rect[3]", "def area_rect(w, h):\n return w * h", "def get_bounding_box(self):\n if len(self.elements) == 0:\n return None\n if not (self._bb_valid and\n all(ref._bb_valid for ref in self.get_dependencies(True))):\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n all_polygons.extend(element.polygons)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n element_bb = element.get_bounding_box()\n if element_bb is not None:\n bb[0, 0] = min(bb[0, 0], element_bb[0, 0])\n bb[0, 1] = min(bb[0, 1], element_bb[0, 1])\n bb[1, 0] = max(bb[1, 0], element_bb[1, 0])\n bb[1, 1] = max(bb[1, 1], element_bb[1, 1])\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bb_valid = True\n _bounding_boxes[self] = bb\n return _bounding_boxes[self]", "def get_bounding_rect(polygon):\n x1, y1, x2, y2 = float('inf'), float('inf'), float('-inf'), float('-inf')\n for x, y in polygon:\n if x < x1:\n x1 = x\n if y < y1:\n y1 = y\n if x > x2:\n x2 = x\n if y > y2:\n y2 = y\n return x1, y1, x2, y2", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_rect(self, sample):\n x1, x2 = sample['previmg'], sample['currimg']\n x1 = x1.unsqueeze(0).to(self.device)\n x2 = x2.unsqueeze(0).to(self.device)\n y = self.model(x1, x2)\n bb = y.data.cpu().numpy().transpose((1, 0))\n bb = bb[:, 0]\n bbox = BoundingBox(bb[0], bb[1], bb[2], bb[3])\n\n # inplace conversion\n bbox.unscale(self.opts['search_region'])\n bbox.uncenter(self.curr_img, self.opts['search_location'],\n self.opts['edge_spacing_x'], self.opts['edge_spacing_y'])\n return bbox.get_bb_list()", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def __CalculateRotatedBox(self, contour):\r\n rectangle = cv2.minAreaRect(contour)\r\n box = cv2.boxPoints(rectangle)\r\n return np.int0(box)", "def compute_bounding_box(homography, w, h):\n\n # todo in what step to do int??\n # todo should i do -1? if so why?\n\n\n all_corners = np.array([[0, 0], [w, 0], [0, h], [w, h]])\n all_corners = apply_homography(all_corners,homography)\n top_left = [np.min(all_corners[:,0]),np.min(all_corners[:,1])]\n bottom_right = [np.max(all_corners[:,0]),np.max(all_corners[:,1])]\n return np.array([top_left,bottom_right]).astype(np.int)", "def bounding_box(vertices, (height, width), extend=5):\n x_min = min(x for x, y in vertices) - extend\n x_max = max(x for x, y in vertices) + extend\n y_min = min(y for x, y in vertices) - extend\n y_max = max(y for x, y in vertices) + extend\n \n return max(x_min, 0), min(x_max, width), max(y_min, 0), min(y_max, height)", "def isRectangle( pathGroup):\n #print 'xxxxxxxx isRectangle',pathGroups\n if isinstance(pathGroup, Circle ): return None\n segmentList = [p for p in pathGroup.listOfPaths if p.isSegment() ]#or p.effectiveNPoints >0]\n if len(segmentList) != 4:\n debug( 'rectangle Failed at length ', len(segmentList))\n return None\n a,b,c,d = segmentList\n\n if length(a.point1, d.pointN)> 0.2*(a.length+d.length)*0.5:\n debug('rectangle test failed closing ', length(a.point1, d.pointN), a.length, d.length)\n return None\n \n Aac , Abd = closeAngleAbs(a.angle,c.angle), closeAngleAbs(b.angle , d.angle)\n if min(Aac,Abd) > 0.07 or max(Aac, Abd) >0.27 :\n debug( 'rectangle Failed at angles', Aac, Abd)\n return None\n notsimilarL = lambda d1,d2: abs(d1-d2)>0.20*min(d1,d2)\n\n pi , twopi = numpy.pi,2*numpy.pi\n angles = numpy.array( [p.angle for p in segmentList] )\n minAngleInd = numpy.argmin( numpy.minimum( abs(angles), abs( abs(angles)-pi), abs( abs(angles)-twopi) ) )\n rotAngle = angles[minAngleInd]\n width = (segmentList[minAngleInd].length + segmentList[(minAngleInd+2)%4].length)*0.5\n height = (segmentList[(minAngleInd+1)%4].length + segmentList[(minAngleInd+3)%4].length)*0.5\n # set rectangle center as the bbox center\n x,y,w,h = computeBox( numpy.concatenate( [ p.points for p in segmentList]) )\n r = Rectangle( numpy.array( [x+w/2, y+h/2]), (width, height), rotAngle, pathGroup.listOfPaths, pathGroup.refNode)\n \n debug( ' found a rectangle !! ', a.length, b.length, c.length, d.length )\n return r", "def bounding_box(self):\n return self.ink_width, self.ink_height", "def calc_bounding_box(self):\n self.BB = self.geos.abs_el(0).BB\n for geo in self.geos.abs_iter():\n self.BB = self.BB.joinBB(geo.BB)", "def extract_bounding_boxes(self, scene):\n objs = scene[\"objects\"]\n rotation = scene[\"directions\"][\"right\"]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n\n for i, obj in enumerate(objs):\n [x, y, z] = obj[\"pixel_coords\"]\n\n [x1, y1, z1] = obj[\"3d_coords\"]\n\n cos_theta, sin_theta, _ = rotation\n\n x1 = x1 * cos_theta + y1 * sin_theta\n y1 = x1 * -sin_theta + y1 * cos_theta\n\n height_d = 6.9 * z1 * (15 - y1) / 2.0\n height_u = height_d\n width_l = height_d\n width_r = height_d\n\n if obj[\"shape\"] == \"cylinder\":\n d = 9.4 + y1\n h = 6.4\n s = z1\n\n height_u *= (s * (h / d + 1)) \\\n / ((s * (h / d + 1)) - (s * (h - s) / d))\n height_d = height_u * (h - s + d) / (h + s + d)\n\n width_l *= 11 / (10 + y1)\n width_r = width_l\n\n if obj[\"shape\"] == \"cube\":\n height_u *= 1.3 * 10 / (10 + y1)\n height_d = height_u\n width_l = height_u\n width_r = height_u\n\n ymin.append((y - height_d) / 320.0)\n ymax.append((y + height_u) / 320.0)\n xmin.append((x - width_l) / 480.0)\n xmax.append((x + width_r) / 480.0)\n\n return xmin, ymin, xmax, ymax", "def clip(self, B):\n # /* Left */\n if ((self.x >= B.x) and (self.x < (B.x + B.w))):\n x = self.x\n elif ((B.x >= self.x) and (B.x < (self.x + self.w))):\n x = B.x\n else:\n return Rect(self.x, self.y, 0, 0)\n\n # /* Right */\n if (((self.x + self.w) > B.x) and ((self.x + self.w) <= (B.x + B.w))):\n w = (self.x + self.w) - x\n elif (((B.x + B.w) > self.x) and ((B.x + B.w) <= (self.x + self.w))):\n w = (B.x + B.w) - x\n else:\n return Rect(self.x, self.y, 0, 0)\n\n # /* Top */\n if ((self.y >= B.y) and (self.y < (B.y + B.h))):\n y = self.y\n elif ((B.y >= self.y) and (B.y < (self.y + self.h))):\n y = B.y\n else:\n return Rect(self.x, self.y, 0, 0)\n\n # /* Bottom */\n if (((self.y + self.h) > B.y) and ((self.y + self.h) <= (B.y + B.h))):\n h = (self.y + self.h) - y\n elif (((B.y + B.h) > self.y) and ((B.y + B.h) <= (self.y + self.h))):\n h = (B.y + B.h) - y\n else:\n return Rect(self.x, self.y, 0, 0)\n\n return Rect(x, y, w, h)", "def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection, self.columns, self.rows, self.spacing[0],\n self.spacing[1])\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))", "def getLocation(bounding_box):\n ymin, xmin, ymax, xmax = bounding_box\n w=1280\n h=720\n left, right, top, bottom = (xmin * w, xmax * w,\n ymin * h, ymax * h)\n ###############################################\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(h, np.floor(bottom + 0.5).astype('int32'))\n right = min(w, np.floor(right + 0.5).astype('int32'))\n # print(label, (left, top), (right, bottom))\n \n return int((left + right) / 2.0), int((top + bottom) / 2.0)\n \n # xlt, ylt, xrb, yrb = bounding_box\n # return int((xlt + xrb) / 2.0), int((ylt + yrb) / 2.0)", "def contour_detect(img_bin, min_area=0, max_area=-1, wh_ratio=2.0):\n rects = []\n _, contours, _ = cv2.findContours(img_bin.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if len(contours) == 0:\n return rects\n\n max_area = img_bin.shape[0]*img_bin.shape[1] if max_area<0 else max_area\n for contour in contours:\n area = cv2.contourArea(contour)\n if area >= min_area and area <= max_area:\n x, y, w, h = cv2.boundingRect(contour)\n if 1.0*w/h < wh_ratio and 1.0*h/w < wh_ratio:\n rects.append([x,y,w,h])\n return rects", "def getContourBound(contour):\n (x,y,w,h) = cv2.boundingRect(contour)\n return (x,y,w,h)", "def _compute_bounding_box(self, points_2d):\n max_x = max(map(lambda point: int(point[0]), points_2d))\n min_x = min(map(lambda point: int(point[0]), points_2d))\n max_y = max(map(lambda point: int(point[1]), points_2d))\n min_y = min(map(lambda point: int(point[1]), points_2d))\n\n width = max_x - min_x + 1\n height = max_y - min_y + 1\n\n return [min_x, min_y, width, height]", "def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox", "def get_yolo_bbox(width, height, c_x, c_y, w_r, h_r):\n x1 = int((c_x - w_r/2) * width)\n y1 = int((c_y - h_r/2) * height)\n\n x2 = int((c_x + w_r/2) * width)\n y2 = int((c_y + h_r/2) * height)\n\n\n p_leftEnd = x1, y1\n p_rightEnd = x2, y2\n\n return p_leftEnd, p_rightEnd", "def boundingbox(self, *args, **kwargs):\n return _image.image_boundingbox(self, *args, **kwargs)", "def bbox(self):\n return self.get_bounding_box()", "def rectangle_area(base, height):\n return (base * height)", "def bbox(self, obj):\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n obj = obj.bbox\n\n # phy -> abs\n mn0 = self.master.xy >= obj[0] ## ge than lower left\n mn1 = self.master.xy <= obj[1] ## le than upper right\\\n\n\n return np.array([mn0, mn1])", "def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if (self.rotation is None and self.magnification is None and\n self.x_reflection is None):\n key = self\n else:\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection)\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))", "def get_bounding_box(self):\n deps_still_valid = all(ref._bb_valid for ref in self.get_dependencies(True))\n cached_bbox_still_valid = self._bb_valid and deps_still_valid\n if not cached_bbox_still_valid:\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for polygon in self.polygons:\n all_polygons.extend(polygon.polygons)\n for path in self.paths:\n all_polygons.extend(path.to_polygonset().polygons)\n for reference in self.references:\n reference_bb = reference.get_bounding_box()\n if reference_bb is not None:\n all_polygons.append(reference_bb)\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bounding_box = bb\n else:\n self._bounding_box = None\n self._bb_valid = True\n\n if self._bounding_box is None:\n return None\n else:\n # return a *copy* of the cached bounding box to ensure it doesn't get inadvertently modified\n return numpy.array(self._bounding_box)", "def minimum_bounding_rectangle(points):\n from scipy.ndimage.interpolation import rotate\n pi2 = np.pi / 2.\n\n # get the convex hull for the points\n hull_points = points[ConvexHull(points).vertices]\n\n # calculate edge angles\n edges = np.zeros((len(hull_points) - 1, 2))\n edges = hull_points[1:] - hull_points[:-1]\n\n angles = np.zeros((len(edges)))\n angles = np.arctan2(edges[:, 1], edges[:, 0])\n\n angles = np.abs(np.mod(angles, pi2))\n angles = np.unique(angles)\n\n # find rotation matrices\n # XXX both work\n rotations = np.vstack([\n np.cos(angles),\n np.cos(angles - pi2),\n np.cos(angles + pi2),\n np.cos(angles)]).T\n # rotations = np.vstack([\n # np.cos(angles),\n # -np.sin(angles),\n # np.sin(angles),\n # np.cos(angles)]).T\n rotations = rotations.reshape((-1, 2, 2))\n\n # apply rotations to the hull\n rot_points = np.dot(rotations, hull_points.T)\n\n # find the bounding points\n min_x = np.nanmin(rot_points[:, 0], axis=1)\n max_x = np.nanmax(rot_points[:, 0], axis=1)\n min_y = np.nanmin(rot_points[:, 1], axis=1)\n max_y = np.nanmax(rot_points[:, 1], axis=1)\n\n # find the box with the best area\n areas = (max_x - min_x) * (max_y - min_y)\n best_idx = np.argmin(areas)\n\n # return the best box\n x1 = max_x[best_idx]\n x2 = min_x[best_idx]\n y1 = max_y[best_idx]\n y2 = min_y[best_idx]\n r = rotations[best_idx]\n\n rval = np.zeros((4, 2))\n rval[0] = np.dot([x1, y2], r)\n rval[1] = np.dot([x2, y2], r)\n rval[2] = np.dot([x2, y1], r)\n rval[3] = np.dot([x1, y1], r)\n\n return rval", "def bounding_box(self):\n if self.bbox is None:\n self.bbox = bounding_box(self)\n return self.bbox", "def bounding_box(self):\n if self.bbox is None:\n self.bbox = bounding_box(self)\n return self.bbox", "def bounding_box(points):\n x, y, w, h = cv2.boundingRect(np.array([p for p in points]))\n bounding = Box(x, y, w, h)\n return bounding", "def calculate_black_rectangle(self, rectangle):\n result = self.count_black[rectangle.lower_right[0], rectangle.lower_right[1]] - \\\n self.count_black[rectangle.upper_right[0], rectangle.upper_right[1]] - \\\n self.count_black[rectangle.lower_left[0], rectangle.lower_left[1]] + \\\n self.count_black[rectangle.upper_left[0], rectangle.upper_left[1]]\n return result", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def to_bounding_box(self):\n if self.bbox is not None:\n return self.bbox\n from .bbox import BBox\n\n xx = self.xx\n yy = self.yy\n self.bbox = BBox(xmin=min(xx), xmax=max(xx), ymin=min(yy), ymax=max(yy), label=self.label, **self.fields)\n return self.bbox", "def get_aabb(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, s], [s, c]])\n return rect(self.get_com(), np.abs(np.dot(r, self.halfexts)), 0)", "def bounding_box(primitive):\n\n if primitive[\"shape\"] == \"circle\":\n bbox = [[primitive[\"center\"][0] - primitive[\"radius\"],\n primitive[\"center\"][1] - primitive[\"radius\"]],\n [primitive[\"center\"][0] + primitive[\"radius\"],\n primitive[\"center\"][1] + primitive[\"radius\"]]]\n else:\n x_coords, y_coords = zip(*primitive[\"vertices\"])\n bbox = [[min(x_coords), min(y_coords)],\n [max(x_coords), max(y_coords)]]\n\n primitive[\"bounding_box\"] = bbox\n return primitive", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def getbounds(self):\n return pygame.Rect(self.rect)", "def compute_bb(self) -> List[float]:\n all_shapes = list(self.parts.values())\n bbox_vertices = unary_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x, min_y, max_y]", "def get_bbox(self, primitive):\n accessor = primitive.attributes.get(\"POSITION\")\n return accessor.min, accessor.max", "def GetBoundingRect(self, item, textOnly=False):\r\n \r\n i = item\r\n\r\n startX, startY = self.GetViewStart()\r\n rect = wx.Rect()\r\n\r\n rect.x = i.GetX() - startX*_PIXELS_PER_UNIT\r\n rect.y = i.GetY() - startY*_PIXELS_PER_UNIT\r\n rect.width = i.GetWidth()\r\n rect.height = self.GetLineHeight(i)\r\n\r\n return rect", "def minimum_bounding_rectangle(points: np.ndarray) -> np.ndarray:\n\n pi2 = np.pi / 2.0\n hull_points = points[ConvexHull(points).vertices]\n edges = hull_points[1:] - hull_points[:-1]\n angles = np.arctan2(edges[:, 1], edges[:, 0])\n angles = np.abs(np.mod(angles, pi2))\n angles = np.unique(angles)\n rotations = np.vstack(\n [np.cos(angles), np.cos(angles - pi2), np.cos(angles + pi2), np.cos(angles)]\n ).T\n rotations = rotations.reshape((-1, 2, 2))\n rot_points = np.dot(rotations, hull_points.T)\n min_x, max_x = np.nanmin(rot_points[:, 0], axis=1), np.nanmax(\n rot_points[:, 0], axis=1\n )\n min_y, max_y = np.nanmin(rot_points[:, 1], axis=1), np.nanmax(\n rot_points[:, 1], axis=1\n )\n areas = (max_x - min_x) * (max_y - min_y)\n best_idx = np.argmin(areas)\n x1, x2 = max_x[best_idx], min_x[best_idx]\n y1, y2 = max_y[best_idx], min_y[best_idx]\n r = rotations[best_idx]\n rval = np.zeros((4, 2))\n rval[0], rval[1] = np.dot([x1, y2], r), np.dot([x2, y2], r)\n rval[2], rval[3] = np.dot([x2, y1], r), np.dot([x1, y1], r)\n return rval", "def collide_rect(self):\n return self.rect", "def draw_bounding_box(objects,color):\n\n for i in range(len(objects)):\n x, y, w, h, d = objects[i].get_attributes()\n print(x, y, w, h, d)\n corr = get_correction(d, a, hfov, x)\n cv2.rectangle(color, (x-corr, y), (x+w-corr, y+h), (0, 255, 0), 4)\n\n try:\n real_x, real_y = get_dimensions(d, w, h, hfov, vfov, 640, 480)\n real_x = round(real_x, 3)\n real_y = round(real_y, 3)\n\n except:\n real_x, real_y = 'ERROR'\n\n cv2.putText(color, 'depth = ' + str(d) + 'm', (30, i*60 + 30) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'width = ' + str(real_x)+ 'm', (30, i*60 + 45) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'height = ' + str(real_y)+ 'm', (30, i*60 + 60) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n\n if(i < len(objects)-1):\n ## distance between left and right object\n distance = round(distance_between_objects(objects[i], objects[i+1], hfov, 640), 3)\n if distance > l:\n textcolor = (0, 255, 0)\n else:\n textcolor = (0, 0, 255)\n\n cv2.putText(color, 'distance between objects = ' + str(distance) + 'm',\n (320, i*60 + 70) , cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 1)", "def bbox(self, obj):\n return self.phy2abs.bbox(obj)", "def get_bounding_box(image, contour=None, threshold=3):\n\tif contour is None:\n\t\tcontour = get_contour(image, threshold)\n\tx,y,w,h = cv2.boundingRect(contour)\n\treturn x,y,w,h", "def detect(self, image):\n\n max = 0\n try:\n rects = self.detector(image, 1)\n if len(rects) == 0:\n rectangle = dlib.rectangle(0, 0, image.shape[1],\n image.shape[0])\n else:\n for (i, rect) in enumerate(rects):\n if rect.area() > max:\n rectangle = rect\n return rectangle\n except RuntimeError:\n return None", "def compute_bounding_box(homography, w, h):\n top_left_x, top_left_y = apply_homography(np.array([[0, 0]]), homography).transpose()\n top_right_x, top_right_y = apply_homography(np.array([[w, 0]]), homography).transpose()\n bottem_left_x, bottem_left_y = apply_homography(np.array([[0, h]]), homography).transpose()\n bottem_right_x, bottem_right_y = apply_homography(np.array([[w, h]]), homography).transpose()\n\n min_x = min([top_left_x, top_right_x, bottem_left_x, bottem_right_x])[0]\n max_x = max([top_left_x, top_right_x, bottem_left_x, bottem_right_x])[0]\n min_y = min([top_left_y, top_right_y, bottem_left_y, bottem_right_y])[0]\n max_y = max([top_left_y, top_right_y, bottem_left_y, bottem_right_y])[0]\n return np.array([[min_x, min_y], [max_x, max_y]]).astype(int)", "def get_bounding_box(self):\n return self._domain.get_bounding_box()", "def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])", "def getRect(self):\n return self.rect()", "def _boundRect(self):\n self.upperleft = list(map(min, zip(*self.addresstamp)))\n self.bottomright = list(map(max, zip(*self.addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def get_roi_rect(self):\n return self.rect_list", "def rectSize(rect):\n return np.array([rect[1] - rect[0], rect[3] - rect[2]])", "def get_upper_body_box(self, img_w, img_h):\n\n if not (img_w > 0 and img_h > 0):\n raise Exception(\"img size should be positive\")\n\n _NOSE = CocoPart.Nose.value\n _NECK = CocoPart.Neck.value\n _RSHOULDER = CocoPart.RShoulder.value\n _LSHOULDER = CocoPart.LShoulder.value\n _THRESHOLD_PART_CONFIDENCE = 0.3\n parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]\n part_coords = [(img_w * part.x, img_h * part.y) for part in parts if\n part.part_idx in [0, 1, 2, 5, 8, 11, 14, 15, 16, 17]]\n\n if len(part_coords) < 5:\n return None\n\n # Initial Bounding Box\n x = min([part[0] for part in part_coords])\n y = min([part[1] for part in part_coords])\n x2 = max([part[0] for part in part_coords])\n y2 = max([part[1] for part in part_coords])\n\n # # ------ Adjust heuristically +\n # if face points are detcted, adjust y value\n\n is_nose, part_nose = _include_part(parts, _NOSE)\n is_neck, part_neck = _include_part(parts, _NECK)\n torso_height = 0\n if is_nose and is_neck:\n y -= (part_neck.y * img_h - y) * 0.8\n torso_height = max(0, (part_neck.y - part_nose.y) * img_h * 2.5)\n #\n # # by using shoulder position, adjust width\n is_rshoulder, part_rshoulder = _include_part(parts, _RSHOULDER)\n is_lshoulder, part_lshoulder = _include_part(parts, _LSHOULDER)\n if is_rshoulder and is_lshoulder:\n half_w = x2 - x\n dx = half_w * 0.15\n x -= dx\n x2 += dx\n elif is_neck:\n if is_lshoulder and not is_rshoulder:\n half_w = abs(part_lshoulder.x - part_neck.x) * img_w * 1.15\n x = min(part_neck.x * img_w - half_w, x)\n x2 = max(part_neck.x * img_w + half_w, x2)\n elif not is_lshoulder and is_rshoulder:\n half_w = abs(part_rshoulder.x - part_neck.x) * img_w * 1.15\n x = min(part_neck.x * img_w - half_w, x)\n x2 = max(part_neck.x * img_w + half_w, x2)\n\n # ------ Adjust heuristically -\n\n # fit into the image frame\n x = max(0, x)\n y = max(0, y)\n x2 = min(img_w - x, x2 - x) + x\n y2 = min(img_h - y, y2 - y) + y\n\n if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:\n return None\n return {\"x\": _round((x + x2) / 2),\n \"y\": _round((y + y2) / 2),\n \"w\": _round(x2 - x),\n \"h\": _round(y2 - y)}", "def determine_bounding_box_of_rotated_box(self, box, rotation_matrix):\n\n # top left, top right, bottom left, bottom right\n p1, p2, p3, p4 = box_points(box)\n\n # rotate all the points of the box\n tp1 = calc_rotate_point_with_rotation_matrix(p1, rotation_matrix)\n tp2 = calc_rotate_point_with_rotation_matrix(p2, rotation_matrix)\n tp3 = calc_rotate_point_with_rotation_matrix(p3, rotation_matrix)\n tp4 = calc_rotate_point_with_rotation_matrix(p4, rotation_matrix)\n\n # figure out which point has the furthest x distance, and the furthest y distance\n dx1 = abs(tp1[0] - tp4[0])\n dx2 = abs(tp2[0] - tp3[0])\n dy1 = abs(tp1[1] - tp4[1])\n dy2 = abs(tp2[1] - tp3[1])\n # the width and the height is the max distance between x and y\n w, h = max(dx1, dx2), max(dy1, dy2)\n\n # x and y is the min x, and min y among all points\n x = min(tp1[0], tp2[0], tp3[0], tp4[0])\n y = min(tp1[1], tp2[1], tp3[1], tp4[1])\n\n return (x, y, w, h)", "def find_bbox_coord(point_x, point_y):\r\n is_good_rect = True\r\n bottom_x, bottom_y = [], []\r\n top_x, top_y = [], []\r\n if len(point_x) < 4:\r\n is_good_rect = False\r\n if len(point_x) == 4:\r\n out_of_repeats_x = []\r\n out_of_repeats_y = []\r\n delta = 10**(-6)\r\n for j in range(len(point_x)): # add delta for the reason of not mess in equal angles\r\n out_of_repeats_x.append(point_x[j] + delta*j)\r\n out_of_repeats_y.append(point_y[j] + delta*j)\r\n point_x, point_y = out_of_repeats_x, out_of_repeats_y\r\n \r\n quadrate_width = ((point_x[1] - point_x[0])**2+(point_y[1] - point_y[0])**2)**0.5\r\n quadrate_height = ((point_x[1] - point_x[2])**2+(point_y[1] - point_y[2])**2)**0.5\r\n aspect_ratio = quadrate_width / quadrate_height\r\n if aspect_ratio > 0.7 and aspect_ratio < 1.3:\r\n is_good_rect = False \r\n ###Aprint('Квадрат. Закрашиваем')\r\n elif quadrate_width * quadrate_height < 100:\r\n is_good_rect = False \r\n ###Aprint('Квадрат. Закрашиваем')\r\n else:\r\n ###Aprint('Прямоугольник')\r\n edge_x, edge_y = point_x, point_y\r\n bottom_x, bottom_y, top_x, top_y, is_good_rect = top_bottom_dots(point_x, point_y, edge_x, edge_y)\r\n \r\n elif len(point_x) > 4:\r\n ###Aprint('Многоугольник')\r\n out_of_repeats_x = []\r\n out_of_repeats_y = []\r\n delta = 10**(-4)\r\n for j in range(len(point_x)): # add delta for the reason of not mess in equal angles\r\n out_of_repeats_x.append(point_x[j] + delta*j)\r\n out_of_repeats_y.append(point_y[j] + delta*j)\r\n point_x, point_y = out_of_repeats_x, out_of_repeats_y\r\n \r\n edge_x, edge_y = find_4_dots(point_x, point_y)\r\n\r\n bottom_x, bottom_y, top_x, top_y, is_good_rect = top_bottom_dots(point_x, point_y, edge_x, edge_y)\r\n \r\n if is_good_rect:\r\n \r\n bottom_edge_x, bottom_edge_y = [], []\r\n for i in bottom_x:\r\n if i in edge_x:\r\n index = bottom_x.index(i)\r\n bottom_edge_x.append(bottom_x[index])\r\n bottom_edge_y.append(bottom_y[index])\r\n bottom_edge_x, bottom_edge_y = zip(*sorted(zip(bottom_edge_x, bottom_edge_y)))\r\n bottom_lowest_point = [bottom_edge_x[0], bottom_edge_y[0]]\r\n\r\n top_edge_x, top_edge_y = [], []\r\n for i in top_x:\r\n if i in edge_x:\r\n index = top_x.index(i)\r\n top_edge_x.append(top_x[index])\r\n top_edge_y.append(top_y[index])\r\n top_edge_x, top_edge_y = zip(*sorted(zip(top_edge_x, top_edge_y)))\r\n top_lowest_point = [top_edge_x[0], top_edge_y[0]]\r\n\r\n bottom_x, bottom_y = Euclidian_distance_sorting(bottom_x, bottom_y, bottom_lowest_point)\r\n top_x, top_y = Euclidian_distance_sorting(top_x, top_y, top_lowest_point)\r\n else:\r\n bottom_x, bottom_y, top_x, top_y = [], [], [], []\r\n \r\n return is_good_rect, bottom_x, bottom_y, top_x, top_y", "def roi_rect(self):\n return (\n self.roi_x_offset, self.roi_y_offset,\n self.roi_x_size, self.roi_y_size,\n )" ]
[ "0.7676917", "0.72490233", "0.71938384", "0.7107463", "0.70778424", "0.7075241", "0.6987138", "0.698335", "0.69716114", "0.691494", "0.6906388", "0.6903976", "0.68773127", "0.685709", "0.6815819", "0.68036884", "0.6765353", "0.6762905", "0.6715452", "0.66673505", "0.66570497", "0.66543585", "0.66437966", "0.6637142", "0.6635336", "0.6630181", "0.66187555", "0.6617563", "0.6607038", "0.6564456", "0.65583116", "0.655349", "0.655349", "0.6530891", "0.6525757", "0.6510906", "0.65052146", "0.6499725", "0.6496138", "0.64846647", "0.64652914", "0.64578146", "0.6435176", "0.64200723", "0.6412583", "0.64001924", "0.63960344", "0.63867545", "0.6371995", "0.6360194", "0.63481665", "0.6346275", "0.634575", "0.6335423", "0.6334174", "0.6333553", "0.63141155", "0.631407", "0.63079673", "0.62923115", "0.62901646", "0.62774897", "0.6276368", "0.6273488", "0.62584674", "0.6257389", "0.625727", "0.6253154", "0.6241006", "0.62361133", "0.62319994", "0.62319994", "0.6217543", "0.62175405", "0.62091976", "0.6186547", "0.6184726", "0.617816", "0.617217", "0.61718076", "0.616675", "0.6164478", "0.61506945", "0.6144924", "0.61430526", "0.6142603", "0.6140775", "0.6124119", "0.61225903", "0.6117977", "0.6116005", "0.61146533", "0.61141133", "0.6102686", "0.6079977", "0.6072872", "0.60674906", "0.6065473", "0.6064346", "0.60643023" ]
0.74042195
1
Calculates the centroid of the contour. Moments up to the third order of a polygon or rasterized shape.
def __CalculateCentroid(self, contour): moments = cv2.moments(contour) centroid = (-1, -1) if moments["m00"] != 0: centroid = (int(round(moments["m10"] / moments["m00"])), int(round(moments["m01"] / moments["m00"]))) return centroid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def centroid(self): # -> BaseGeometry:\n ...", "def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid", "def centroid(self) -> Point:\n points = self.normalized_array\n centroids = [np.average(points[[0, i, i + 1], :-1], axis=0) for i in range(1, points.shape[0] - 1)]\n weights = [det(self._normalized_projection()[[0, i, i + 1]]) / 2 for i in range(1, points.shape[0] - 1)]\n return Point(*np.average(centroids, weights=weights, axis=0))", "def centroid(self):\n x, y = self.coordinates\n A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))\n cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n return Point((cx, cy), properties=self.properties, crs=self.crs)", "def getCentroid(self):\n if len(self.points) == 0:\n # None\n return None\n elif len(self.points) == 1:\n # Same point\n return self.points[0]\n elif len(self.points) == 2:\n # Middle of a segment\n return Segment(*self.points).middle\n elif len(self.points) == 3:\n # Intersection point of 2 medians\n return Point.average(self.points)\n else:\n # Geometric decomposition to compute centroids (wikipedia)\n n = len(self.points) # n is the number of points\n # There are n-2 forms\n forms = [Form([self.points[0]] + self.points[i:i + 2]) for i in range(1, n - 1)]\n # So n-2 centroids and areas, except if some of the points are one upon another, no area is null\n centroids = [form.center for form in forms]\n areas = [form.area for form in forms]\n # we compute the average centroid weighted by the areas\n weighted_centroid = Point.sum([a * c for (c, a) in zip(centroids, areas)])\n centroid = weighted_centroid / sum(areas)\n return centroid", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def get_contour_centroid(contour):\n M = cv2.moments(contour)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def compute_polygon_centroid_2d(polygon):\r\n return geometry.gmComputePolygonCentroid(polygon)", "def centroid(t, v):\n c = numpy.zeros(v[0].shape)\n total_area = 0\n for i in range(len(t)):\n p = vertices(t[i], v)\n ct = triangle.centroid(p)\n area = triangle.area(p)\n c += area * ct\n total_area += area\n c /= total_area\n return c", "def getCentroid(self):\n centroid = 0.0\n sumMagnitude = 0.0\n\n for i in range(0,self.nUniquePoints):\n freq,magnitude = self.fDomain[i]\n\n centroid += freq*magnitude\n sumMagnitude += magnitude\n \n centroid /= sumMagnitude\n return centroid", "def centroid(self):\n return _property_geo(arctern.ST_Centroid, self)", "def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def centroid(self) -> Point[Scalar]:\n return self._context.multipoint_centroid(self)", "def calculate_polygon_centroid(polygon):\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n # Get area - needed to compute centroid\n A = calculate_polygon_area(P, signed=True)\n\n # Extract x and y coordinates\n x = P[:, 0]\n y = P[:, 1]\n\n # Exercise: Compute C as shown in http://paulbourke.net/geometry/polyarea\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n cx = x[:-1] + x[1:]\n cy = y[:-1] + y[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n # Create Nx2 array and return\n C = numpy.array([Cx, Cy])\n return C", "def centroid_for_uncomputed_shapes(shape_list: List[List[Tuple[float, float]]]) -> Tuple[float, float]:\n centroids = []\n areas = []\n for s in shape_list:\n centroids.append(convex_centroid(s))\n areas.append(convex_area(s))\n return centroid_for_shapes(centroids, areas)", "def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)", "def calc_centroid(self):\n sumX = 0.0\n sumY = 0.0\n dis = 0.0\n for p in self.points:\n sumX += p.x\n sumY += p.y\n d = p.distance(self.centroid)\n if dis < d: dis = d\n # radius is the longest distance within points\n self.radius = dis + 0.1\n size = len(self.points)\n if size:\n return Point(x=float(sumX)/size, y=float(sumY)/size)\n else:\n return self.centroid", "def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))", "def get_centroid(moments):\n if moments['m00'] > 0:\n centroid_x = moments['m10']/moments['m00']\n centroid_y = moments['m01']/moments['m00']\n else:\n centroid_x = 0.0\n centroid_y = 0.0\n return centroid_x, centroid_y", "def centroid(self) -> PointValue:\n return ops.GeoCentroid(self).to_expr()", "def calc_centroid(self, points):\n\t\tself.canvas.create_polygon(points)\n\t\tx = [i[0] for i in points] # all the math is wrong :(\n\t\ty = [j[1] for j in points]\n\n\t\tarea = x[0] * (y[0] - y[-1])\n\t\tx_hat = (x[0] ** 2) * (y[0] - y[-1]) / (2) \n\t\ty_hat = -(y[0] ** 2) * (x[0] - x[-1]) / (2)\n\n\t\tfor i in range(1, len(points) - 1):\n\t\t\tdt = length(x[i], y[i], x[i - 1], y[i - 1])\n\t\t\tdy = y[i] - y[i - 1]\n\t\t\tdx = x[i] - x[i - 1]\n\t\t\tarea += 2 * x[i] * dy\n\t\t\tx_hat += (x[i] ** 2) * dy\n\t\t\ty_hat -= (y[i] ** 2) * dx\n\n\t\tarea += x[-1] * (y[-1] - y[-2])\n\t\tx_hat += (x[-1] ** 2) * (y[-1] - y[-2]) / 2\n\t\ty_hat -= (y[-1] ** 2) * (x[-1] - x[-2]) / 2\n\t\tarea /= 2\n\t\tx_hat /=2\n\t\ty_hat /= 2\n\t\tprint(\"Area: %s\\nX: %s\\nY: %s\" % (area, x_hat/area, y_hat/area))\n\t\treturn x_hat/area, y_hat/area", "def centroid_of_rect(roi):\n return int(roi.shape[0] / 2), int(roi.shape[1] / 2)", "def test_polygon_centroids(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Create closed simple polygon (clock wise)\n # FIXME (Ole): Not sure whether to raise an exception or\n # to return absolute value in this case\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(168.5, -1.5)' % tuple(C))\n assert numpy.allclose(C, [168.5, -1.5]), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid\n reference_centroid = [106.7036938, -6.134533855] # From qgis\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8)\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)\n\n # Another realistic polygon\n P = numpy.array([[106.7922547, -6.2297884],\n [106.7924589, -6.2298087],\n [106.7924538, -6.2299127],\n [106.7922547, -6.2298899],\n [106.7922547, -6.2297884]])\n\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid from qgis\n reference_centroid = [106.79235602697445, -6.229849764722536]\n msg = 'Got %s but expected %s' % (str(C), str(reference_centroid))\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8), msg\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)", "def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid", "def centroid(image, threshold=0, binarize=False):\n\n signal = np.where(image > threshold)\n sy, sx = image.shape[0], image.shape[1]\n\n temp = np.zeros((sy, sx))\n\n if binarize is True:\n temp[signal] = 1.0\n else:\n temp[signal] = image[signal]\n\n profx = 1.0 * temp.sum(axis=0)\n profy = 1.0 * temp.sum(axis=1)\n profx -= np.min(profx)\n profy -= np.min(profy)\n\n x0 = (profx * np.arange(sx)).sum() / profx.sum()\n y0 = (profy * np.arange(sy)).sum() / profy.sum()\n\n return (x0, y0)", "def CenterOfMassForShape(shape):\n polygons = SplitIntoPolygons(shape)\n total_A = 0\n total_cx = 0\n total_cy = 0\n\n for polygon in polygons:\n cx, cy, A = CenterOfMass(polygon)\n total_cx += A * cx\n total_cy += A * cy\n total_A += A\n\n return (total_cx / total_A, total_cy / total_A)", "def getCentroid(self) -> Vec3:\n return self.centroid()", "def centroid(arr):\n l = arr.shape[0]\n ixs = np.arange(l)\n arr = arr - np.median(arr)\n arr = np.where(arr < 0, 0, arr) \n ixs2 = ixs * ixs\n sumarr = arr.sum()\n cen = np.dot(arr, ixs)/sumarr\n return cen, math.sqrt(np.dot(arr, ixs2)/sumarr - cen * cen)", "def centroid_1D(image, xpeak, xhw, debug=False):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n c_sum = 0.0\n xcen = 0.0\n \n for ii in xrange(int(xpeak - xhw - 1), int(xpeak + xhw - 1)):\n c_sum = c_sum + vector[ii]\n xloc = ii + 1\n xcen += xloc * vector[ii]\n \n print('(centroid_1D): Sum = ', c_sum)\n \n \n if c_sum == 0:\n print('(centroid_1D): ERROR - divide by zero')\n else:\n xcen /= c_sum\n \n print('(centroid_1D): Centroid = ', xcen-1)\n \n # -1 on both axes, as Python is 0 major \n return xcen-1, c_sum", "def centroid(func, step=0.1):\n points = func.points(step)\n num, den = 0, 0\n\n for x, y in points:\n num += x * y\n den += y\n\n return num / den", "def centroid(self, method: str = 'median') -> Vector:\n\n\t\toriginal_origins = deepcopy(self.origin)\n\t\tself.origin_to_centroid(method=method)\n\t\tcentroid = deepcopy(self.origin)\n\t\tself.origin = original_origins\n\n\t\tif len(self._meshes) > 1:\n\t\t\treturn sum(centroid) / len(self._meshes)\n\n\t\treturn centroid", "def GetCentroid(self, p_float=..., p_float=..., p_float=...):\n ...", "def getCoordinateCentroid(coords):\n # Check we're getting numpy arrays\n assert(type(coords).__module__ == np.__name__)\n assert(coords.shape[1] == 2)\n coords = np.radians(coords)\n lat, lon = coords[:,0], coords[:,1]\n # compute location in 3D axis\n X = np.cos(lat) * np.cos(lon)\n Y = np.cos(lat) * np.sin(lon)\n Z = np.sin(lat)\n\n x, y, z = np.mean(X), np.mean(Y), np.mean(Z)\n centroid_lon = np.arctan2(y, x)\n hyp = np.sqrt(x*x + y*y)\n centroid_lat = np.arctan2(z, hyp)\n\n return np.degrees(centroid_lat), np.degrees(centroid_lon)", "def hull_centroid(points):\n dim = [np.unique(points[:, i]).size != 1 for i in range(3)]\n hull = ConvexHull(points[:, dim])\n centroid = points.mean(axis=0)\n centroid[dim] = hull.points[hull.vertices].mean(axis=0)\n\n return centroid", "def get_centroid(poly):\n # Make sure poly is formatted correctly\n if len(poly) < 3:\n raise ValueError('polygon has less than 3 points')\n for point in poly:\n if type(point) is not list or 2 != len(point):\n print(type(point))\n raise ValueError('point is not a list of length 2')\n # Calculate the centroid from the weighted average of the polygon's\n # constituent triangles\n area_total = 0\n centroid_total = [float(poly[0][0]), float(poly[0][1])]\n for i in range(0, len(poly) - 2):\n # Get points for triangle ABC\n a, b, c = poly[0], poly[i+1], poly[i+2]\n # Calculate the signed area of triangle ABC\n area = ((a[0] * (b[1] - c[1])) +\n (b[0] * (c[1] - a[1])) +\n (c[0] * (a[1] - b[1]))) / 2.0\n # If the area is zero, the triangle's line segments are\n # colinear so we should skip it\n if 0 == area:\n continue\n # The centroid of the triangle ABC is the average of its three\n # vertices\n centroid = [(a[0] + b[0] + c[0]) / 3.0, (a[1] + b[1] + c[1]) / 3.0]\n # Add triangle ABC's area and centroid to the weighted average\n centroid_total[0] = ((area_total * centroid_total[0]) +\n (area * centroid[0])) / (area_total + area)\n centroid_total[1] = ((area_total * centroid_total[1]) +\n (area * centroid[1])) / (area_total + area)\n area_total += area\n return centroid_total", "def get_centroids(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=2] out\n\n if dim == 0:\n return self.coors\n\n else:\n out = np.empty((self.mesh.topology.num[dim], self.dim),\n dtype=np.float64)\n mesh_get_centroids(self.mesh, &out[0, 0], dim)\n\n return out", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def compute_centroid(data):\n return sum(data[:]) / len(data)", "def center_of_coor(coordinates):\n return (np.sum(coordinates, axis=0) / coordinates.shape[0])", "def get_centroid(M):\t\n\treturn int(M['m10']/M['m00']), int(M['m01']/M['m00'])", "def centroid(self, coords):\r\n return np.mean(coords, axis=0)", "def getCentroid(self):\r\n return self._centroid", "def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def center_of_mass_polygon(polygon):\n L = 0\n cx = 0\n cy = 0\n cz = 0\n p = len(polygon)\n for i in range(-1, p - 1):\n p1 = polygon[i]\n p2 = polygon[i + 1]\n d = distance_point_point(p1, p2)\n cx += 0.5 * d * (p1[0] + p2[0])\n cy += 0.5 * d * (p1[1] + p2[1])\n cz += 0.5 * d * (p1[2] + p2[2])\n L += d\n cx = cx / L\n cy = cy / L\n cz = cz / L\n return cx, cy, cz", "def centroid_of_points(pts):\n xs, ys, zs = 0, 0, 0\n for pt in pts:\n xs += pt[0]\n ys += pt[1]\n if len(pt) > 2:\n zs += pt[2]\n if len(pts) > 0:\n xs /= len(pts)\n ys /= len(pts)\n if len(pts[0]) > 2:\n zs /= len(pts)\n return xs, ys, zs\n return xs, ys", "def geojson_centroid(obj):\n points = coords(obj)\n return best_effort_centroid2d(points)", "def get_centroid(points):\n\n xs, ys = points[:, 0], points[:, 1]\n\n a = xs[:-1] * ys[1:]\n b = ys[:-1] * xs[1:]\n\n A = numpy.sum(a - b) / 2.\n\n cx = xs[:-1] + xs[1:]\n cy = ys[:-1] + ys[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n return Cx, Cy", "def centroids(img):\n _, _, _, centr = cv2.connectedComponentsWithStats(img)\n return centr[1:]", "def centroids(t, v):\n c = numpy.zeros((len(t), 3))\n for i in range(len(t)):\n p = vertices(t[i], v)\n c[i] = triangle.centroid(p)\n return c", "def _find_coord_centre(self,\n shape_coords) -> np.array:\n return shape_coords.mean(axis=0)", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts", "def centroid_for_shapes(centroids: List[Tuple[float, float]],\n areas: List[float] = None) -> Tuple[float, float]:\n gc = np.zeros(2)\n area = 0\n if areas is None:\n areas = np.ones(len(centroids))\n for pc, a in zip(centroids, areas):\n gc += np.array(pc)*a\n area += a\n gc /= area\n return np.array(gc)", "def get_element_centroids(self):\n if self.centroids is None:\n self.centroids = np.vstack((\n np.mean(self.grid['x'], axis=1),\n np.mean(self.grid['z'], axis=1)\n )).T\n\n return self.centroids", "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def center_of_contour(contorno):\n M = cv2.moments(contorno)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"]!=0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (int(cX), int(cY))\n else:\n return (200,150)", "def centroid(self, unit='spatial'):\n com = ndimage.center_of_mass(self.data)\n if unit != 'spatial':\n return com\n else:\n # tuple - cast from generator\n # sample spacing - indices to units\n # x-c -- index shifted from center\n return tuple(self.sample_spacing * (x-c) for x, c in zip(com, (self.center_y, self.center_x)))", "def get_centroid_3d(v):\n\n if isinstance(v, dict):\n centroids = {}\n for n, s in v.iteritems():\n if isinstance(s, tuple): # volume, origin_or_bbox\n vol, origin_or_bbox = s\n if len(origin_or_bbox) == 3:\n origin = origin_or_bbox\n elif len(origin_or_bbox) == 6:\n bbox = origin_or_bbox\n origin = bbox[[0,2,4]]\n else:\n raise\n centroids[n] = np.mean(np.where(vol), axis=1)[[1,0,2]] + origin\n else: # volume\n centroids[n] = np.mean(np.where(s), axis=1)[[1,0,2]]\n return centroids\n else:\n return np.mean(np.where(v), axis=1)[[1,0,2]]", "def find_centroid_for_each(self):", "def coordinates(self):\n # TODO: Add the feature where coordinates come from multiple sources.\n # Consider whether or not you'd want to output the categorical\n # variable indicating the source of the coordinate data or\n # make the user place coordinates a different property entirely.\n try:\n bounding_box = array(\n self.status.place\n [\"bounding_box\"]\n [\"coordinates\"]\n ).squeeze()\n centroid = bounding_box.mean(axis=0)\n return centroid\n except AttributeError:\n return zeros(2)", "def spectral_centroid(data, fft_data):\n freq = np.fft.fftfreq(len(data))[:len(fft_data)]\n freq[-1] += 1 # last element needs to be 0.5 not -0.5\n spec = np.abs(fft_data)\n spec_sum = np.sum(spec)\n if spec_sum == 0:\n return 0.25\n centroid = np.sum(spec*freq)/spec_sum\n return centroid", "def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))", "def Centroid(self):\n return Vector(self.centroid)", "def find_center_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] == 0:\n (x, y), _ = cv2.minEnclosingCircle(contour)\n cR = int(y)\n cC = int(x)\n # raise ValueError(\"Contour too small to find a new center.\")\n else:\n cR = int(M[\"m01\"] / M[\"m00\"])\n cC = int(M[\"m10\"] / M[\"m00\"])\n return (cR, cC)", "def find_center( contours ):\r\n ret = []\r\n\r\n for x in contours:\r\n M = cv2.moments( x )\r\n pt = Point()\r\n pt.x = int( M['m10']/M['m00'] )\r\n pt.y = int( M['m01']/M['m00'] )\r\n\r\n ret.append( pt )\r\n\r\n return( ret );", "def element_centroid(self, element):\n return centroid_points(self.nodes_xyz(nodes=self.elements[element].nodes))", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def centroids(network,\n geometry,\n **kwargs):\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n tcs = geometry[\"throat.centroid\"][geom_throats]\n pc = geometry[\"pore.centroid\"][geom_pore]\n value[geom_pore]=_sp.mean(_sp.sqrt(((tcs-pc)*(tcs-pc))[:,0]+((tcs-pc)*(tcs-pc))[:,1]+((tcs-pc)*(tcs-pc))[:,2]))*2\n return value", "def find_centroid(event_file):\n \n print('Finding the centroid of the event file...\\n')\n \n make_img(event_file,clobber=True)\n \n fits = pyfits.open('temp.fits')\n \n #Previously used the RA and DEC headers to find the centre, now trying a more nuanced\n #max pixel value method\n \n #source_ra = fits[1].header['RA_TARG']\n #source_dec = fits[1].header['DEC_TARG']\n \n #return source_ra,source_dec\n \n data = fits[0].data\n \n #As the data from make_img is 1024x1024 based on the centre of the image, use modulo\n #arithmetic to find the physical x and y coordinates\n \n argmax = np.argmax(data)\n \n x = argmax%1024 + 3584\n y = int(argmax/1024) + 3584\n \n return x,y", "def Light_Spot_Centroid(self,Amp,x,y,Amp_flag=True):\r\n \r\n if Amp_flag:\r\n I = (Amp*np.conjugate(Amp)).real\r\n else:\r\n I = Amp\r\n dx = x[0,1]-x[0,0]\r\n Nominator_x = self.double_trapz(I*x,dx=dx,dy=dx)\r\n Nominator_y = self.double_trapz(I*y,dx=dx,dy=dx)\r\n Denominator = self.double_trapz(I,dx=dx,dy=dx)\r\n \r\n x_c = Nominator_x/Denominator\r\n y_c = Nominator_y/Denominator\r\n \r\n return x_c,y_c", "def get_molecule_centroid(molecule_xyz):\n return np.mean(molecule_xyz, axis=0)", "def CenterOfMass(points):\n A = AreaOfPolygon(points)\n N = len(points)\n cx = 0\n cy = 0\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n part = (x_i * y_ip1 - x_ip1 * y_i)\n cx += ((x_i + x_ip1) * part)\n cy += ((y_i + y_ip1) * part)\n return (cx/(6*A), cy/(6*A), abs(A))", "def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None", "def getBeliefsCentroid(self, idx):\n x = 0.0\n y = 0.0\n total = 0.0\n for p in self.beliefs[idx]:\n x += p[0]\n y += p[1]\n total += 1.0\n return (round(x / total), round(y / total))", "def findCentroid(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n \n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomCentroid = geomProj.Centroid()\n geomCentroid.Transform(llTr)\n \n centroidDict = eval(geomCentroid.ExportToJson())\n centroidXY = centroidDict['coordinates']\n return centroidXY", "def get_circles_centers(triangles):\n points1, points2, points3 = (triangles[:, 0],\n triangles[:, 1],\n triangles[:, 2])\n # Vectors\n sides1 = points2 - points1\n sides2 = points3 - points1\n # Length of vector of cross product * 2\n area = 2 * (sides1[:, 0] * sides2[:, 1] - sides1[:, 1] * sides2[:, 0])\n\n # (y_2(x_1^2 + y_1^2) - y_1(x_2^2 + y_2^2)) / area + x\n centers_x = ((sides2[:, 1] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1])) -\n sides1[:, 1] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1]))) /\n area + points1[:, 0])\n centers_y = ((sides1[:, 0] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1])) -\n sides2[:, 0] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1]))) /\n area + points1[:, 1])\n\n # Transportated.\n return np.array((centers_x, centers_y)).T", "def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp", "def findCentroid(pivlist):\n centroid = [0.0, 0.0, 0.0]\n for p in pivlist:\n centroid[0] += p[0]\n centroid[1] += p[1]\n centroid[2] += p[2]\n\n centroid[0] /= len(pivlist)\n centroid[1] /= len(pivlist)\n centroid[2] /= len(pivlist)\n return centroid", "def center_directions(contours, image: ndarray):\n return contours_.contour_average_center(contours)", "def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y", "def get_region_centroid(mask, region):\n coords = np.column_stack(np.where(mask == region))\n coords = np.apply_along_axis(np.mean, 0, coords).round()\n coords = np.uint8(coords)\n return(coords)", "def mcentroid(xarr, yarr, kern=default_kernal, xc=None, xdiff=None):\n\n if xdiff is None:\n xdiff = len(kern)\n\n if xdiff < len(kern):\n xdiff = len(kern)\n\n\n if xc is not None and xdiff:\n mask = (abs(xarr - xc) < xdiff)\n else:\n mask = np.ones(len(xarr), dtype=bool)\n\n # convle the input array with the default kernal\n warr = np.convolve(yarr[mask], kern, mode='same')\n\n # interpolate the results\n # imask is used to make sure we are only gettin the\n # center pixels\n imask = (abs(xarr[mask]-xarr[mask].mean()) < 3)\n cx = np.interp(0, warr[imask], xarr[mask][imask])\n return cx", "def find_centroids(self, img, n=1):\n # Find contours\n contours = cv2.findContours(np.uint8(img), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Get centers and areas\n centers = []\n areas = []\n for c in contours:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / max(M[\"m00\"],1))\n cY = int(M[\"m01\"] / max(M[\"m00\"],1))\n centers.append([cX,cY])\n areas.append(cv2.contourArea(c))\n \n # Make sure we have enough contours\n detected=True\n while len(areas)<n:\n centers.append(None)\n areas.append(0)\n detected=False\n \n # Find top n sorted contours\n sorted_centers = []\n for i in np.argsort(-1*np.array(areas))[:n]:\n sorted_centers.append(centers[i])\n \n return np.array(sorted_centers), detected", "def test_centroids_mask():\n data = np.ones((2, 2)).astype(np.float)\n mask = [[False, False], [True, True]]\n centroid = centroid_com(data, mask=None)\n centroid_mask = centroid_com(data, mask=mask)\n assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6)\n assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6)", "def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])", "def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])", "def centroid(self) -> Point:\n # if the hydroline is defined, use the centroid of the hydroline\n if isinstance(self.geometry, Polyline):\n pt = Geometry({\n 'x': np.mean([self.putin.geometry.x, self.takeout.geometry.x]),\n 'y': np.mean([self.putin.geometry.y, self.takeout.geometry.y]),\n 'spatialReference': self.putin.geometry.spatial_reference\n })\n\n # if both accesses are defined, use the mean of the accesses\n elif isinstance(self.putin, ReachPoint) and isinstance(self.takeout, ReachPoint):\n\n # create a point geometry using the average coordinates\n pt = Geometry({\n 'x': np.mean([self.putin.geometry.x, self.takeout.geometry.x]),\n 'y': np.mean([self.putin.geometry.y, self.takeout.geometry.y]),\n 'spatialReference': self.putin.geometry.spatial_reference\n })\n\n # if only the putin is defined, use that\n elif isinstance(self.putin, ReachPoint):\n pt = self.putin.geometry\n\n # and if on the takeout is defined, likely the person digitizing was taking too many hits from the bong\n elif isinstance(self.takeout, ReachPoint):\n pt = self.takeout.geometry\n\n else:\n pt = None\n\n return pt", "def centroid(clusters):\n centroids = list(map(\n lambda cluster: tuple(map(\n lambda x, cluster=cluster: x / len(cluster['vertices']),\n sum(map(\n numpy.array,\n cluster['vertices'])))),\n clusters))\n return centroids", "def centroid(self, region_list):\n centroid_list = [] # a list of [(distance from robot, centroid)]\n robot = map_helper.map_to_world(self.start[0], self.start[1], self.resolution, self.x_offset, self.y_offset)\n\t#rospy.loginfo(region_list)\n for region in region_list:\n n = len(region)\n i = math.trunc(n/2)\n centroid = region[i]\n\n x = abs(centroid[0] - robot[0])\n y = abs(centroid[1] - robot[1])\n dist = math.hypot(x, y)\n centroid_list.append((dist, centroid))\n return self.smallest_centroid(centroid_list)", "def compute_voronoi_centroid_volume(vertices):\n from scipy.spatial import Delaunay, ConvexHull\n\n tess = Delaunay(vertices)\n dimension = np.shape(vertices)[1]\n\n w = np.zeros((tess.nsimplex, 1))\n cent = np.zeros((tess.nsimplex, dimension))\n for i in range(tess.nsimplex):\n # pylint: disable=E1136\n ch = ConvexHull(tess.points[tess.simplices[i]])\n w[i] = ch.volume\n cent[i, :] = np.mean(tess.points[tess.simplices[i]], axis=0)\n\n volume = np.sum(w)\n centroid = np.matmul(np.divide(w, volume).T, cent)\n\n return centroid, volume", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)" ]
[ "0.7549272", "0.7480494", "0.74477065", "0.72905296", "0.72855806", "0.7277071", "0.72547257", "0.7159602", "0.71536976", "0.70305306", "0.7029446", "0.7029446", "0.7026189", "0.6984927", "0.69735426", "0.6927554", "0.69200563", "0.69048506", "0.69047856", "0.6894379", "0.68849826", "0.68483096", "0.67892766", "0.6761048", "0.67595977", "0.6730462", "0.67082334", "0.6707628", "0.6683429", "0.6682657", "0.66605735", "0.6650885", "0.66328174", "0.6631892", "0.6604373", "0.65820533", "0.6573958", "0.6529919", "0.64982986", "0.64962673", "0.6478394", "0.64725274", "0.64673185", "0.64656216", "0.6426678", "0.64148474", "0.63952553", "0.63934183", "0.6381811", "0.63564676", "0.63093114", "0.630514", "0.6302313", "0.6300358", "0.63000417", "0.62951046", "0.6282139", "0.62809545", "0.6275831", "0.62710625", "0.62322086", "0.62178653", "0.6216586", "0.6211527", "0.61534166", "0.61404055", "0.61393076", "0.61358637", "0.6124322", "0.61169684", "0.6112454", "0.61096936", "0.6094065", "0.60915416", "0.60497355", "0.6045493", "0.6041668", "0.60384095", "0.6031207", "0.6022439", "0.6019365", "0.6012734", "0.6011377", "0.6007254", "0.59954447", "0.59945166", "0.5990955", "0.5967702", "0.59600127", "0.5952361", "0.5951998", "0.5937915", "0.5931731", "0.5931731", "0.5922544", "0.5920475", "0.5898544", "0.5893791", "0.5889329", "0.5869899" ]
0.80175775
0
Calculate the circumcircle of an object using the function cv2.minEnclosingCircle(). It is a circle which completely covers the object with minimum area.
def __CalculateCircle(self, contour): return cv2.minEnclosingCircle(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def find_min_circle(contours):\n center = (0, 0)\n radius = 0\n\n if len(contours) > 0:\n #compute the minimum enclosing circle and centroid\n c = max(contours, key=cv2.contourArea)\n (x, y), radius = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n else:\n #ball not found\n center = None\n radius = None\n return center, radius", "def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def fit_central_circle(image, radius_lower_bound=170, radius_upper_bound=190):\n\n smoothed = smooth_gaussian(image.astype(np.float), sigma=5)\n edges = find_edges_sobel(smoothed)\n thresh = threshold_otsu(edges)\n\n hmm = 170, 190\n hough_radii = np.arange(140, 170, 2)\n hough_res = hough_circle(thresh, hough_radii)\n\n circles = find_n_best_hough_circles(hough_radii, hough_res, 1)\n circle = circles[0]\n\n return circle", "def houghCircle(img: np.ndarray, min_radius: float, max_radius: float) -> list:\r\n\r\n canny_cv, canny_my = edgeDetectionCanny(img, 200, 100)\r\n edges = []\r\n\r\n for x in range(canny_cv.shape[0]):\r\n for y in range(canny_cv.shape[1]):\r\n if canny_cv[x, y] == 255:\r\n edges.append((x, y))\r\n\r\n thresh = 0.47 # at least 47% of the pixels of a circle must be detected\r\n steps = 100 # number of samples from each circle\r\n\r\n points = []\r\n for r in range(min_radius, max_radius + 1):\r\n for t in range(steps):\r\n alpha = 2 * pi * t / steps\r\n x = int(r * cos(alpha))\r\n y = int(r * sin(alpha))\r\n points.append((x, y, r))\r\n\r\n temp_circles = {} # dict{circle center, radius: counter}\r\n for x, y in edges: # iterate the pixels of the edges:\r\n for dx, dy, r in points:\r\n b = x - dx\r\n a = y - dy\r\n count = temp_circles.get((a, b, r))\r\n if count is None:\r\n count = 0\r\n temp_circles[(a, b, r)] = count + 1\r\n\r\n # now add the appropriate circles to the ans list:\r\n circles = []\r\n sorted_temp = sorted(temp_circles.items(), key=lambda i: -i[1])\r\n for circle, counter in sorted_temp:\r\n x, y, r = circle\r\n # once a circle has been selected, we reject all the circles whose center is inside that circle\r\n if counter / steps >= thresh and all((x - xc) ** 2 + (y - yc) ** 2 > rc ** 2 for xc, yc, rc in circles):\r\n circles.append((x, y, r))\r\n\r\n return circles", "def find_largest_enclosing_circle(img):\n if img.dtype is not np.dtype(np.uint8):\n raise ValueError('The input image data type should be uint8.')\n\n # Calculate histogram.\n hist = cv.calcHist([img], [0], None, [256], [0, 256])\n\n # Find the min and max intensity value on the image.\n min_i, max_i = find_histogram_range(hist)\n\n # Threshold the image at the median intensity.\n _, binary_img = cv.threshold(img, (max_i + min_i) / 2, 255, cv.THRESH_BINARY)\n\n # Find contours.\n contours, _ = cv.findContours(binary_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_TC89_L1)\n if len(contours) == 0:\n return (0, 0), 0\n\n # Find a minimum enclosing circle for each contour, and find the largest one.\n circles = [cv.minEnclosingCircle(contour) for contour in contours]\n max_circle = max(circles, key=lambda circle: circle[1])\n (center_x, center_y), radius = max_circle\n return (int(center_x), int(center_y)), int(radius)", "def cropCircleROI(image, additionalCut = 5):\n Rmin = np.min(image.shape[:-1])/3\n Rmin = 1250 / 3040 * image.shape[0]\n Rmax = 1400 / 3040 * image.shape[0]\n\n #downscale image for better performance\n reduceFactor = 5 # squared\n hough_radii = np.arange(Rmin/reduceFactor, Rmax/reduceFactor, dtype = int)\n\n downSampledImage = block_reduce(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), block_size = (reduceFactor, reduceFactor), func = np.max)\n downSampledEdges = canny(downSampledImage, sigma=3, low_threshold=5, high_threshold=10)\n\n hough_res = hough_circle(downSampledEdges, hough_radii)\n downSampledCircle = np.unravel_index(np.argmax(hough_res, axis=None), hough_res.shape)\n circle = np.array([downSampledCircle[1], downSampledCircle[2], hough_radii[downSampledCircle[0]]])*reduceFactor\n\n circleMask_ = cv2.circle(np.ones(image.shape[:-1],dtype = \"uint8\"), (circle[1], circle[0]), circle[2]-additionalCut, 0, thickness = -1)\n\n return [np.ma.array(image[:,:,i], mask = circleMask_) for i in range (image.shape[2])]", "def circle_circumference(a):\n return (2*a*math.pi)", "def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)", "def center_on_box(img, radius, min_ref, xmin, xmax, ymin, ymax, na_val=-9999):\n x, y = num.meshgrid(num.arange(-radius, radius), num.arange(-radius, radius))\n coords = [(i, j) for i, j in zip(x.flatten(), y.flatten()) if (i ** 2 + j ** 2) ** 0.5 <= radius]\n fit = [num.mean(img[(xmin + i):(xmax + i), (ymin + j):(ymax + j)]) for i, j in coords]\n if num.nanmin(fit) <= min_ref:\n return num.array(coords[num.nanargmin(fit)])\n else:\n return num.array([na_val, na_val])", "def circle_contractivity_radius(self,acc=1.e-13,rmax=1000):\n from nodepy.utils import bisect\n\n tol=1.e-14\n r=bisect(0,rmax,acc,tol,self.__num__()._is_circle_contractive)\n return r", "def object_circularity(labelmask, label):\n # Find z slice with most pixels from object.\n z, i, j = np.where(labelmask == label)\n zmax = mode(z)[0][0]\n # Select 2D image representing object's max Z-slice.\n im = np.where(labelmask[zmax] == label, 1, 0)\n # Calculate circularity from object perimeter and area.\n regions = regionprops(im)\n perimeter = regions[0].perimeter\n area = regions[0].area\n if (perimeter == 0):\n perimeter = 0.5\n circularity = 4 * np.pi * area / (perimeter ** 2) \n return circularity", "def circumcenter(self) -> Point:\n e1, e2, e3 = self.edges\n bisector1 = e1._line.perpendicular(e1.midpoint, plane=self._plane)\n bisector2 = e2._line.perpendicular(e2.midpoint, plane=self._plane)\n return bisector1.meet(bisector2)", "def houghCircles(img, minDist=20, param1=50, param2=30, minRadius=0, maxRadius=0):\n\ttmp = grayscale(img)\n\ttmp = cv2.medianBlur(tmp, 5)\n\tcimg = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)\n\tcircles = cv2.HoughCircles(tmp, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)\n\tif circles is None:\n\t\tprint \"No circles found, please adjust params...\\n\"\n\t\treturn None\n\tcircles = np.uint16(np.around(circles))\n\treturn circles", "def circumradius(T,binary_mask):\n (x1, y1), (x2, y2), (x3, y3) = T # extracting the points. \n \n D=2*(x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2)) # Diameter\n if D!=0:\n #Centroid of the cicumcircle\n Ux=(((x1**2+y1**2)*(y2-y3))+((x2**2+y2**2)*(y3-y1))+((x3**2+y3**2)*(y1-y2)))/D\n Uy=(((x1**2+y1**2)*(x3-x2))+((x2**2+y2**2)*(x1-x3))+((x3**2+y3**2)*(x2-x1)))/D\n \n #radius\n r = sqrt((Ux-x2)**2+(Uy-y2)**2)\n r=r+1\n \n #Determining the sign: it is positive if the centroid of the circumcricle is in the foreground\n x=np.floor(Ux).astype(int)\n y=np.floor(Uy).astype(int)\n\n if (x >=binary_mask.shape[0] or y >=binary_mask.shape[1]):\n r=-r\n elif (x<0 or y<0):\n r=-r\n elif binary_mask[x,y]:\n r=r\n else:\n r=-r\n return r\n else:\n return False", "def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def find_inner_circle_parameters(plane_array, rmin=200, rmax=250):\n\n xdim, ydim = plane_array.shape\n\n edges = find_edges_sobel(plane_array)\n\n hough_radii = np.arange(rmin, rmax, 3)\n hough_res = hough_circle(edges, hough_radii)\n\n # Find the two clearest circles\n c1, c2 = find_n_best_hough_circles(hough_radii, hough_res, 2)\n\n # Work out which is the inner circle\n r1 = c1[2]\n r2 = c2[2]\n if r1 > r2:\n inner_circle_radius = r2\n cx, cy, r = c2\n else:\n inner_circle_radius = r1\n cx, cy, r = c1\n\n return cx, cy, r", "def objects_radius(self, centre, radius):", "def fit_circle(x,y,center_estimate=(0,0)):\r\n def calc_R(center):\r\n \"\"\"\r\n Calculate the distance of each 2D point from the center (xc, yc) \r\n \"\"\"\r\n xc = center[0]\r\n yc = center[1]\r\n return np.sqrt((x-xc)**2 + (y-yc)**2)\r\n \r\n def f_2(center):\r\n \"\"\"\r\n Calculate the algebraic distance between the data points and the mean\r\n circle centered at (xc, yc)\r\n \"\"\"\r\n Ri = calc_R(center)\r\n return Ri - Ri.mean()\r\n\r\n center, ier = optimize.leastsq(f_2,center_estimate)\r\n \r\n Ri = calc_R(center)\r\n R = np.mean(Ri)\r\n residue = sum((Ri - R)**2)\r\n return R, center, residue", "def circle_radius(a, b, c):\n # the sides cannot be negative\n if a < 0 or b < 0 or c < 0:\n return None\n else:\n # semi-perimeter of the circle\n p = (a + b + c) / 2\n\n # area of the traingle\n area = sqrt(p * (p - a) *\n (p - b) * (p - c))\n # Radius of the incircle\n radius = area / p\n # Return the radius\n return radius", "def area_of_circle(radius):\n return radius", "def incircle(self, a, b, c):\n m11, m12 = a.x - self.x, a.y - self.y\n m13 = m11 * m11 + m12 * m12\n m21, m22 = b.x - self.x, b.y - self.y\n m23 = m21 * m21 + m22 * m22\n m31, m32 = c.x - self.x, c.y - self.y\n m33 = m31 * m31 + m32 * m32\n det1 = m11 * (m22 * m33 - m23 * m32)\n det2 = m12 * (m21 * m33 - m23 * m31)\n det3 = m13 * (m21 * m32 - m22 * m31)\n return near(det1 - det2 + det3, 0)", "def find_center_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] == 0:\n (x, y), _ = cv2.minEnclosingCircle(contour)\n cR = int(y)\n cC = int(x)\n # raise ValueError(\"Contour too small to find a new center.\")\n else:\n cR = int(M[\"m01\"] / M[\"m00\"])\n cC = int(M[\"m10\"] / M[\"m00\"])\n return (cR, cC)", "def circumference(self):\n return math.pi * self.radius * 2", "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def circumference(self):\n return self.width + self.height", "def circleArea(radius):\n return math.pi * radius * radius", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def create_circle_mask(img, center, radius):\n h, w = img.shape\n # Flipping center ordering here to account for differences in how opencv returns\n # coordinates (standard x,y) and how numpy wants them (r,c)\n act_center = np.array(center[::-1])\n r, c = np.ogrid[0:h, 0:w] - np.array(center[::-1])\n return (r ** 2 + c ** 2) < radius ** 2", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def calc_isoconc_point(r_min, r_max):\n I = np.sqrt((r_min**2.0 + r_min * r_max + r_max**2.0)/3.0)\n if not isinstance(I, float):\n msg = 'isoconcentration point calc error: {} is not a float.'\n raise TypeError, msg.format(I)\n return I", "def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2", "def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )", "def circumcenter(C):\n ri, rj, rk = C.transpose(1,2,0)\n ax, ay = ri\n bx, by = rj\n cx, cy = rk\n d = 2 * (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by))\n ux = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (\n ay - by)) / d\n uy = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (\n bx - ax)) / d\n vs = np.empty((ax.size,2),dtype=np.float64)\n vs[:,0],vs[:,1] = ux,uy\n return vs", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def get_circ(event,x,y,flags,param):\t\n global frame,frameCenter#,diameter\n click = 0\n if event == cv2.EVENT_LBUTTONDOWN:\n #draw circles where user clicks\n cv2.circle(frame,(x,y),1,(0,0,0),-1)\n cv2.imshow('Click 6 points on circle', frame)\n diameter.append(np.sqrt((frameCenter[0]-x)**2 + (frameCenter[1]-y)**2))\n return diameter", "def circle_center(self):\n return self.container.width / 2, self.container.height / 2", "def GetCircle(circle):\r\n pass", "def circle_area(radius):\n return math.pi * radius ** 2", "def draw_circles_hough(image, circles):\n try:\n # Convert the circle parameters a, b and r to integers. \n detected_circles = np.uint16(np.around(circles)) \n \n for pt in detected_circles[0, :]: \n a, b, r = pt[0], pt[1], pt[2] \n \n # Draw the circumference of the circle. \n image = cv2.circle(image, (a, b), r, (0, 255, 0), 2) \n \n # Draw a small circle (of radius 1) to show the center. \n image_final = cv2.circle(image, (a, b), 1, (0, 0, 255), 3) \n \n return image_final\n except:\n print('[ERROR]: could not draw image')\n return None", "def get_center(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_center()", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def get_roi_circle(self):\n return self.circle_list", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def get_uvcircle(Grid):\n \n# center of circulation\n loc=-67.5;lac=41.5; \n dx=(Grid['lonc']-loc)*Grid['coslatc']\n dy=(Grid['latc']-lac)\n di=np.sqrt(dx*dx+dy*dy)\n an=np.angle(dx+1j*dy)\n# velocity is linearly increasing with distance \n# 0.1 m/s at 1 deg distance away from center \n# cyclonic gyre \n u=-0.1*di*np.sin(an)\n v= 0.1*di*np.cos(an)\n# adjust the velocity so that the rotation will be perfect \n# on lon-lat plane\n u=u*Grid['coslatc']/np.cos(lac*np.pi/180) \n \n return u,v", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def getAverageColorInCircle(img, cx, cy, radius):\n maxy, maxx,channels = img.shape\n\n mask = np.zeros((maxy, maxx), np.uint8)\n cv2.circle(mask, (cx, cy), radius, 255, -1)\n\n C = cv2.mean(img, mask)\n C = colorsys.rgb_to_hsv(C[2], C[1], C[0])\n return C", "def drawHoughCircles(img, minDist=20, param1=50, param2=30, minRadius=0, maxRadius=0 ,colorCircle=(0,255,0), colorCenter=(0,0,255), centerRadius=2 , thickness=2):\n\ttmp = grayscale(img)\n\ttmp = cv2.medianBlur(tmp, 5)\n\tcimg = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)\n\tcircles = cv2.HoughCircles(tmp, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)\n\tif circles is None:\n\t\tprint \"No circles found, please adjust params...\\n\"\n\t\treturn None\n\tcircles = np.uint16(np.around(circles))\n\tfor i in circles[0,:]:\n\t\tcv2.circle(cimg, (i[0],i[1]),i[2], colorCircle, thickness)\n\t\tcv2.circle(cimg, (i[0],i[1]), centerRadius, colorCenter, thickness)\n\treturn cimg", "def circle(x, r, a, b, x_lim):\n y = (b + np.sqrt(maximum(\n r ** 2 - ((x - a) ** 2) * (x >= x_lim[0]) * (x <= x_lim[1]))\n )) * (x >= x_lim[0]) * (x <= x_lim[1])\n return y", "def mid_point(img, box):\n\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n \n x_mid = int((x1+x2)/2)\n y_mid = int(y2)\n mid = (x_mid,y_mid)\n \n _ = cv2.circle(img, mid, 5, (255, 0, 0), -1)\n \n return mid", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def circle_of_least_confusion(self):\n ff=beam_field() \n ff.rectangular_grid(1,2000,self.entrance_pupil)\n ff.propagate(self.surfaces)\n def f(x):\n pl=ff.project_onto_plane(x)\n return max(pl[:,1])\n \n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=(self.surfaces[-1].pos()-self.surfaces[-2].pos())/2\n #print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n \n return res.final_simplex[0][0,0],res.final_simplex[1][0]", "def outer_radius(self):\n return self._outer_radius", "def drawCircle(img, center, radius = 3, color = (0,0,255), fill = -1):\n\tcv2.circle(img, center, radius, color, fill)", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def detect_large_circle(self, img):\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1.2, 100, 50, 150,\n 100)\n circle_mask = np.zeros(img.shape[:2], dtype='uint8')\n\n large_circle = None\n if circles is not None:\n # convert the (x, y) coordinates and radius of the circles to integers\n circles = np.round(circles[0, :]).astype(\"int\")\n print(circles)\n # loop over the (x, y) coordinates and radius of the circles\n try:\n for (x, y, r) in circles:\n large_circle = (x, y, r)\n # draw the circle in the output image, then draw a rectangle\n # corresponding to the center of the circle\n cv2.circle(img, (x, y), r, (0, 255, 0), 4)\n cv2.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n except TypeError:\n return (0, 0, 1), circle_mask\n cv2.circle(circle_mask, tuple(large_circle[:2]), large_circle[-1], 255, -1)\n return large_circle, circle_mask", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d", "def normalize_image(img, circle_center, circle_radius, target_circle_radius):\n # Center the image to the circle center.\n num_rows, num_cols, _ = img.shape\n img_center_x = num_cols / 2\n img_center_y = num_rows / 2\n scale = target_circle_radius / circle_radius\n\n # Create translation and scaling matrices.\n origin_to_circle_center_translation = np.float32(\n [[1, 0, -1 * circle_center[0]],\n [0, 1, -1 * circle_center[1]],\n [0, 0, 1]])\n circle_centered_scaling = np.float32(\n [[scale, 0, 0],\n [0, scale, 0],\n [0, 0, 1]])\n circle_center_to_new_origin_translation = np.float32(\n [[1, 0, img_center_x],\n [0, 1, img_center_y],\n [0, 0, 1]])\n transformation_matrix = np.matmul(\n circle_center_to_new_origin_translation,\n np.matmul(circle_centered_scaling, origin_to_circle_center_translation))\n\n # Apply the transformation.\n # Note the x-y order of image size.\n return cv.warpAffine(img, transformation_matrix[:2], (num_cols, num_rows))", "def cutout(self, centre, radius):", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def calculate_curvature_radius(self):\n\n # meters per pixel in y dimension\n ym_per_pix = config[\"video\"][\"y_meters_per_pixel\"]\n frame_height = config[\"video\"][\"size\"][1]\n\n # y_eval is where we want to evaluate the fits for the line radius calcuation\n # for us it's at the bottom of the image for us, and because we know\n # the size of our video/images we can just hardcode it\n y_eval = frame_height * ym_per_pix\n fit = self.line_fit_m\n\n # https://stackoverflow.com/a/40021903\n if fit.size != 0:\n curve_rad = ((1 + (2 * fit[0] * y_eval + fit[1]) ** 2) ** 1.5) / np.absolute(2 * fit[0])\n else:\n curve_rad = None\n self.curvature_radius = curve_rad", "def trackCircle( center, rad, imShape ):\n \n \"\"\"\n center = ccnt\n rad = rd\n inShape = segImg.shape\n debug = False\n \"\"\"\n \n # check if whole circle is inside image\n if (center[0] - rad) < 0 or (center[0] + rad) >= imShape[1] or (center[1] - rad) < 0 or (center[1] + rad) >= imShape[0]:\n raise NameError( 'Circle partialy outside the image' )\n \n center = np.array( center )\n \n # start tracking at right side of circle, always pick neigbouring pixel which is closest to tabs radius and stop when came around\n startPoint1 = np.round( center + np.array( [ rad, 0] ) )\n \n currentPoint = startPoint1.copy()\n contour = [ currentPoint ]\n iterNum = 0\n maxIterNum = 1000\n \n def getNextPoint():\n \"\"\"\n gets next point \n \"\"\"\n surroundingPts_local = np.array( [ [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1], [0,1], [1,1] ])\n surroundingPts_global = np.tile( currentPoint, [8,1] ) + surroundingPts_local\n \n if len( contour ) > 1:\n # dont use last\n includeInd = np.sum( surroundingPts_global == contour[-2], 1 ) != 2\n # aditionaly exlude neighbout pts\n excludeInd = np.where( includeInd == False)[0][0]\n if excludeInd == 0:\n includeInd[ [1, 7] ] = False\n elif excludeInd == 7:\n includeInd[ [0, 6] ] = False\n else:\n includeInd[ [ excludeInd-1, excludeInd+1 ] ] = False\n \n surroundingPts_global = surroundingPts_global * np.tile( includeInd, [2,1] ).T\n \n # find closest to demamnded radius\n dists = np.abs( np.sqrt( np.sum( ( surroundingPts_global - np.tile( center, [8,1] ) )**2, 1 ) ) - rad )\n ind = np.argmin( dists )\n return surroundingPts_global[ ind, : ]\n \n while 1:\n # check if max num of iterations passed\n if iterNum == maxIterNum:\n print Warning( 'Reached max num of iterations. Tracking unsuccessful!' )\n #return np.array( contour ).astype(np.int), -1\n break\n \n # get next point\n nextPoint = getNextPoint()\n\n # in first iteraton also remember sesond tracked point.\n if iterNum is 0: \n startPoint2 = nextPoint.copy()\n \n # check if came around\n if iterNum > 2 and ( np.sum(nextPoint == startPoint1) ==2 or np.sum(nextPoint == startPoint2) == 2 ):\n # finished successfuly\n break \n # print iterNum, nextPoint - startPoint1, nextPoint\n \n # add to storage\n contour.append( nextPoint ) \n # increment \n iterNum += 1\n # reassign\n currentPoint = nextPoint.copy()\n\n # return result and successful flag\n return np.array( contour ).astype(np.int)", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def ridgecircle(self, x, expo=0.5):\r\n a = len(x)\r\n s = sum(x**2)\r\n return ((s - a)**2)**(expo/2) + s/a + sum(x)/a", "def area_circle(r):\n return (r ** 2) * math.pi", "def circumference(self):\n raise NotImplementedError", "def strip_outside_circle(input_array, center, radius):\n\n cx, cy = center\n r = radius\n xdim, ydim = input_array.shape\n\n y, x = np.ogrid[-cx:xdim-cx,-cy:ydim-cy]\n # Small adjustment for aliasing\n r = r - 2\n mask = x*x + y*y >= r*r\n\n output_array = np.copy(input_array)\n output_array[mask] = 0\n\n return output_array", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def circular_levelset(shape, center, sqradius, scalerow=1.0):\n grid = np.mgrid[list(map(slice, shape))].T - center\n phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))\n u = np.float_(phi > 0)\n return u", "def min_curvature(self, uv):\n min_curv = GeomLProp_SLProps(\n self.surface(), uv[0], uv[1], 2, 1e-9\n ).MinCurvature()\n if self.reversed():\n min_curv *= -1\n return min_curv", "def curvature(self):\n return self.circle().curvature(self.o, self.r, p = self.a)", "def circleMask(img, cir_x, cir_y, r, mode, filter=0):\n\n if not mode == 'interior' and not mode == 'exterior':\n print(mode, \"is not a supported mode. Please enter interior or exterior\")\n return 1\n\n #get the dimensions of the image\n n,m = img.shape\n\n #create an open grid for our image\n y,x = np.ogrid[0:n, 0:m]\n #operate on a copy of the image\n copyImg = img.copy()\n\n #get the x and y center points of our image\n center_x = cir_x\n center_y = cir_y\n\n #create a circle mask\n if mode == 'interior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 <= r**2\n elif mode == 'exterior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 >= r**2\n\n #black out anywhere within the circle mask\n copyImg[circle_mask] = [filter]\n copyImg[copyImg != filter] = [255-filter]\n\n return copyImg", "def intersection_area(self, d, R, r):\n \n if d <= abs(R-r):\n # One circle is entirely enclosed in the other.\n return np.pi * min(R, r)**2\n if d >= r + R:\n # The circles don't overlap at all.\n return 0\n \n r2, R2, d2 = r**2, R**2, d**2\n alpha = np.arccos((d2 + r2 - R2) / (2*d*r))\n beta = np.arccos((d2 + R2 - r2) / (2*d*R))\n answer = (r2 * alpha + R2 * beta -\n 0.5 * (r2 * np.sin(2*alpha) + R2 * np.sin(2*beta)))\n return answer", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def circum(radius, places):\n return 2 * pi * radius", "def circle_of_least_confusion(self,start):\n def f(x):\n pl=self.project_onto_plane(x)\n return max(pl[:,1])-min(pl[:,1])\n\n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=start\n print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n\n return res.final_simplex[0][0,0],res.final_simplex[1][0]", "def circle(self):\n return circle(self.N, self.o, self.r)", "def solve_circle(pts, known_radius, guess, max_error_accepted):\n\n if len(pts) < 2:\n return None\n\n x, y = pts.T\n def error(c):\n xc, yc = c\n return np.sqrt((x-xc)**2 + (y-yc)**2)-known_radius\n\n center, ier = optimize.leastsq(error, guess, maxfev=50) # maxfev limits the number of iterations\n if ier in [1,2,3,4]:\n errs = error(center)\n max_err = np.amax(np.abs(errs))\n if max_err > max_error_accepted:\n return None\n return center\n return None", "def semicircle_intersection(coeffs):\n a, b = coeffs\n disc = 1 - 4 * b * (a + b)\n if disc < 0:\n raise ValueError(\n \"Discriminant < 0, there is no intersection with the semicircle.\"\n )\n else:\n disc = np.sqrt(disc)\n x = (1 - 2 * a * b + np.array([1, -1]) * disc) / (2 * (a ** 2 + 1))\n return x + 1j * np.sqrt(x - x ** 2)", "def GetCircleMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertCurve3dToBezier_GetCircleMode(self, *args)", "def createCircularMask(shape, radius=4, center=None):\n w = shape[0]\n h = shape[1]\n if center is None: \n center = [int(w/2), int(h/2)]\n if radius is None:\n radius = min(center[0], center[1], w-center[0], h-center[1])\n X, Y = np.ogrid[:w, :h]\n dist2 = (X - center[0])**2 + (Y-center[1])**2\n mask = dist2 <= radius**2\n return mask", "def detect_circles(image): \n try:\n if len(image.shape) > 2:\n print('[ERROR]: Dimension > 2. Is an image gray?')\n return None\n \n circles = cv2.HoughCircles(image, cv2.HOUGH_GRADIENT,1, \n image.shape[0]/8, param1=100,\n param2=50,minRadius=0,maxRadius=0) \n if len(circles) == 0:\n print('[ERROR]: not possible to detect circles')\n return None \n else:\n return circles\n except:\n print('[ERROR]: could not detect circles')\n return None", "def find_black_center(cv_img, msk):\n\n # Convert to black and white\n (rows, cols, _) = cv_img.shape\n grey_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)\n grey_img = cv2.bilateralFilter(grey_img, 11, 17, 17)\n _, outlines = cv2.threshold(\n grey_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n # Subtract gripper\n msk_out = cv2.subtract(cv2.bitwise_not(outlines), msk)\n\n # Remove objects touching edges\n flood_fill_edges(msk_out, 30)\n\n # Find contours\n _, contours, _ = cv2.findContours(\n msk_out, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n if len(contours) == 0:\n return [(-1, -1), False]\n\n # Find largest contour\n max_area = 0\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > max_area:\n contour = cnt\n max_area = area\n\n # Approximate contour\n epsilon = 0.025 * cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, epsilon, True)\n\n # Find centroid\n try:\n M = cv2.moments(approx)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n return [(cx, cy), approx]\n except ZeroDivisionError:\n return [(-1, -1), False]", "def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def circumference(self, lat):\n return 2 * np.pi * self.rsphere * np.cos(np.deg2rad(lat))", "def radius(self):\n return sqrt(self.radius_square())", "def detect_and_draw_contours(frame, thresh, meas_last, meas_now, min_area = 0, max_area = 10000, ellipses = False, directors = False):\n # Detect contours and draw them based on specified area thresholds\n img, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n final = frame.copy()\n\n i = 0\n meas_last = meas_now.copy()\n del meas_now[:]\n director = 0. \n rx = ry = 0.\n cx = cy = 0.\n\n fname_min_enc_C = \"min_enc_C.dat\"\n f_min_enc_C = open(fname_min_enc_C,'a+')\n R_min_enc_C = x_min_enc_C = y_min_enc_C = 0.\n \n while i < len(contours):\n area = cv2.contourArea(contours[i])\n if area < min_area or area > max_area:\n del contours[i]\n else:\n\n cv2.drawContours(final, contours, i, (0,0,255), 1)\n # add ellipse here\n if ( ellipses ):\n ellipse = cv2.fitEllipse(contours[i])\n cv2.ellipse(final,ellipse,(0,255,0),2)\n M = cv2.moments(contours[i])\n\n # here is the ouput showing minEnclosingCircle, which should\n # basically give a long-axis measurement of any given ellipse\n (x_min_enc_C, y_min_enc_C), R_min_enc_C = cv2.minEnclosingCircle(contours[i]) \n f_min_enc_C.write(\"%e %e %e\\n\" %(x_min_enc_C,y_min_enc_C,R_min_enc_C))\n\n if M['m00'] != 0:\n cx = M['m10']/M['m00']\n cy = M['m01']/M['m00']\n if ( directors ):\n mu20 = M['m20']/M['m00'] - pow(cx,2)\n mu02 = M['m02']/M['m00'] - pow(cy,2)\n mu11 = M['m11']/M['m00'] - cx*cy\n else:\n \tcx = 0\n \tcy = 0\n\n if ( directors ):\n ry = 2*mu11\n rx = mu20-mu02\n if rx == 0:\n atan = 0.5*np.pi\n if ry < 0: atan *= -1 \n director = np.fmod(0.5*atan,2*np.pi) + np.pi\n else:\n director = np.fmod(0.5*np.arctan(ry/rx),2*np.pi) + np.pi\n if (rx < 0):\n director += np.pi/2.\n\n vsize = 10\n cv2.line(final,\n (int(cx - vsize*np.cos(director)), int(cy - vsize*np.sin(director))),\n (int(cx + vsize*np.cos(director)), int(cy + vsize*np.sin(director))), \n (255,0,0),2)\n meas_now.append([cx,cy,director])\n else: \n meas_now.append([cx,cy])\n\n i += 1\n\n f_min_enc_C.close()\n\n fname_ndist = \"ndist.dat\"\n f_ndist = open(fname_ndist,'a+')\n meas_now = np.array(meas_now)\n for i in range(len(meas_now)):\n for j in range(i+1,len(meas_now)):\n f_ndist.write(\"%e \\n\" % distance(meas_now[i,:-1],meas_now[j,:-1]))\n f_ndist.close()\n meas_now = list(meas_now)\n \n return final, contours, meas_last, meas_now", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)" ]
[ "0.74524856", "0.72214043", "0.6968209", "0.655094", "0.637335", "0.62411404", "0.61780167", "0.61020416", "0.6038088", "0.6018948", "0.6009186", "0.59972024", "0.5996103", "0.5971567", "0.5965567", "0.5927634", "0.5920173", "0.5918442", "0.59156424", "0.5889828", "0.58796084", "0.5877014", "0.5871086", "0.586304", "0.5827431", "0.58237016", "0.5807667", "0.5807441", "0.5785175", "0.5781718", "0.5777818", "0.5760991", "0.5753921", "0.57450664", "0.5741361", "0.5710303", "0.56814814", "0.56623733", "0.5661651", "0.5660854", "0.5659585", "0.5653853", "0.5645558", "0.5644711", "0.56242377", "0.5609698", "0.5589096", "0.5584767", "0.5576434", "0.55718344", "0.55603206", "0.5552903", "0.55524874", "0.55423176", "0.5506143", "0.5504095", "0.550244", "0.55020726", "0.5482961", "0.5481979", "0.54666877", "0.54608214", "0.5455663", "0.54531175", "0.54464984", "0.5443415", "0.54310685", "0.5429936", "0.54229313", "0.5418076", "0.5415066", "0.5409706", "0.5405059", "0.5404213", "0.53926283", "0.5391717", "0.5378174", "0.53728724", "0.5371103", "0.5371084", "0.5356856", "0.5355825", "0.534326", "0.53427947", "0.53383183", "0.53255653", "0.5318563", "0.5314921", "0.53060657", "0.5305946", "0.5304386", "0.5302471", "0.53024256", "0.5298796", "0.529361", "0.5293602", "0.5290914", "0.5289436", "0.528284", "0.5281554" ]
0.80844194
0
Calculate the ellipse circularity.
def __CalculateCircularity(self, contour): if len(contour) < 2: return 0 perimeter = cv2.arcLength(contour, False) area = self.__CalculateArea(contour) return (4 * math.pi * area) / (perimeter * perimeter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator", "def r_ellipse(self,xc=None,yc=None):\n x = self.x\n y = self.y\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.rel = sqrt(self.cxx*(x-xc)**2 +\n\t\t self.cyy*(y-yc)**2 +\n\t\t self.cxy*(x-xc)*(y-yc)\n\t\t )", "def ellipse_ellipticity(S):\n return 1/2 * np.arcsin(S[..., 3]/S[..., 0])", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)", "def ellipse_radii_test(radii, eccentricity = 0, perimeter = 2*np.pi*1):\n a,b = radii\n return (np.sqrt(np.absolute(1 - (b**2)/(a**2))) - eccentricity,\n # perimeter approximation from https://www.mathsisfun.com/geometry/ellipse-perimeter.html\n np.pi * (3 * (a + b) - np.sqrt(np.absolute((3 * a + b) * (a + 3 * b)))) - perimeter)", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def calcualte_ellipse_radii(guess, eccentricity = 0, perimeter = 2 * np.pi*1):\n return fsolve(ellipse_radii_test, guess, args = (eccentricity, perimeter))", "def create_ellipse(self, ratio):\n circ = Point(self.center).buffer(1.0)\n ell = affinity.scale(circ, float(\n self.lengths[0]*ratio), float(self.lengths[1]*ratio))\n ellr = affinity.rotate(ell, self.angle)\n return ellr", "def area_ellipse(radius_x: float, radius_y: float) -> float:\r\n if radius_x < 0 or radius_y < 0:\r\n raise ValueError(\"area_ellipse() only accepts non-negative values\")\r\n return pi * radius_x * radius_y", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)", "def getEllipse(self, xc, Sigma, nSigma=2):\n\n if nla.det(Sigma) == 0:\n return None\n\n w, v = nla.eig(Sigma)\n D = np.diag(w, 0)\n\n theta = np.linspace(0, 2*np.pi, 100, endpoint=True)\n circle = nSigma*np.vstack((np.cos(theta), np.sin(theta)))\n\n el = sla.sqrtm(D)\n el = el.dot(circle)\n el = v.dot(el)\n\n XY = xc + el\n\n return XY", "def orbital_eccentricity(self):\n return self._orbital_eccentricity", "def test_calc_circle(self):\n t = AioBaseTurtle()\n steps, step_len, rot_step = t._calc_circle(100, extent=180)\n self.assertEqual(steps, 14)\n self.assertAlmostEqual(rot_step, 180.0 / 14.0)\n self.assertAlmostEqual(step_len, 22.3928952207)", "def area_circle(r):\n return (r ** 2) * math.pi", "def circle_circumference(a):\n return (2*a*math.pi)", "def circleArea(radius):\n return math.pi * radius * radius", "def _getEllipseSize(self, pointInEllipse):\n x = abs(self.center[0] - pointInEllipse[0])\n y = abs(self.center[1] - pointInEllipse[1])\n if x == 0 or y == 0:\n return x, y\n # Ellipse definitions\n # e: eccentricity\n # a: length fron center to bounding box width\n # b: length fron center to bounding box height\n # Equations\n # (1) b < a\n # (2) For x,y a point in the ellipse: x^2/a^2 + y^2/b^2 = 1\n # (3) b = a * sqrt(1-e^2)\n # (4) e = sqrt(a^2 - b^2) / a\n\n # The eccentricity of the ellipse defined by a,b=x,y is the same\n # as the one we are searching for.\n swap = x < y\n if swap:\n x, y = y, x\n e = math.sqrt(x**2 - y**2) / x\n # From (2) using (3) to replace b\n # a^2 = x^2 + y^2 / (1-e^2)\n a = math.sqrt(x**2 + y**2 / (1.0 - e**2))\n b = a * math.sqrt(1 - e**2)\n if swap:\n a, b = b, a\n return a, b", "def ellipse(x,y,a,b):\n return ((x/float(a))**2 + (y/float(b))**2)", "def drawEllipse(img, center, axes, angle, startAngle=0, endAngle=360, color = (0,0,255), fill = -1):\n\tcv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, fill)", "def circumference(self):\n raise NotImplementedError", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)", "def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b", "def ridgecircle(self, x, expo=0.5):\r\n a = len(x)\r\n s = sum(x**2)\r\n return ((s - a)**2)**(expo/2) + s/a + sum(x)/a", "def area_of_circle(radius):\n return radius", "def ellipse(self, x, y, radiusx, radiusy, rotation=0, startangle=0, endangle=2 * pi, anticlockwise=False):\n self._impl.ellipse(x, y, radiusx, radiusy, rotation, startangle, endangle, anticlockwise)", "def circle(\n network,\n pore_diameter='pore.diameter',\n):\n return _pi/4 * network[pore_diameter]**2", "def circumference(self):\n return math.pi * self.radius * 2", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def calculate_eccentricity_ratio(self):\n if not self.bearing_type == \"short_bearing\":\n warnings.warn(\n \"Function calculate_eccentricity_ratio suitable only for short bearings. \"\n \"The ratio between the bearing length and its radius should be less or \"\n \"equal to 0.25. Currently we have \"\n + str(self.length / self.radius_stator)\n + \".\"\n )\n s = self.modified_sommerfeld_number()\n coefficients = [\n 1,\n -4,\n (6 - (s ** 2) * (16 - np.pi ** 2)),\n -(4 + (np.pi ** 2) * (s ** 2)),\n 1,\n ]\n roots = np.roots(coefficients)\n for i in range(0, len(roots)):\n if 0 <= roots[i] <= 1:\n return np.sqrt(roots[i].real)\n sys.exit(\"Eccentricity ratio could not be calculated.\")", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def GetCircle(circle):\r\n pass", "def circle(self, clear_screen=True, x=50, y=50, radius=40, fill_color='black', outline_color='black'):\n\n if clear_screen:\n self.clear()\n\n x1 = x - radius\n y1 = y - radius\n x2 = x + radius\n y2 = y + radius\n\n return self.draw.ellipse((x1, y1, x2, y2), fill=fill_color, outline=outline_color)", "def AddEllipse(self,centerPnt,majorAxis,radiusRatio):\n\t\tellipse=self.Space.addEllipse(centerPnt,majorAxis,radiusRatio)\n\t\treturn ellipse", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def value_circle(self):\r\n return self.circle", "def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2", "def extractEllipseCircum(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for circumference\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1 - aniso**2 )\n\n # Circumference function\n f = lambda theta, e: np.sqrt( 1 - e**2 * np.sin(theta)**2 )\n\n # Using quadrature integration\n F, err = quad(f,0,np.pi/2.,args=(e,))\n\n if err > 1e-5:\n raise Exception(\"Circumference integration has failed.\")\n\n # Circumference = 4 * a * int_0^pi/2 f d\\theta\n circ = 4 * kperp * F\n\n return circ", "def circle_area(radius):\n return math.pi * radius ** 2", "def circle(self):\n return circle(self.N, self.o, self.r)", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def get_points_on_ellipse(a, b, numPoints, startAngle = 0, verbose = False, increment = 0.01):\n def distance(x1,y1,x2,y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)\n x0 = a\n y0 = 0\n angle = 0\n d = 0\n while(angle <= 360):\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n d += distance(x0,y0,x,y)\n x0 = x\n y0 = y\n angle += increment\n if verbose:\n print(\"The estimated circumference of ellipse is {:f}\".format(d))\n points = []\n arcLength = d/numPoints\n angle = 0\n x0 = a\n y0 = 0\n angle0 = 0\n while(angle0 < startAngle):\n angle += increment\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n x0 = x\n y0 = y\n angle0 = angle\n for i in range(numPoints):\n dist = 0\n while(dist < arcLength):\n angle += increment\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n dist += distance(x0,y0,x,y)\n x0 = x\n y0 = y\n if verbose:\n print(\n \"{} : angle = {:.2f}\\tdifference = {:.2f}\\tDistance {:.2f}\"\n .format(i+1,angle, angle-angle0,dist))\n points.append([x0, y0])\n angle0 = angle\n return np.array(points)", "def collide(self):\n dist = distance.cdist(self.object_position, self.object_position, \"euclidean\")\n collision = ((dist - self.object_radius) <= 0) * 1\n np.fill_diagonal(collision, 0)\n collision = np.sum(collision, axis=1)\n print(dist)\n print(collision)\n return collision", "def fit_ellipse(*args, equatorial_radius, dequatorial_radius=0, center_f=0, dcenter_f=0, center_g=0,\n dcenter_g=0, oblateness=0, doblateness=0, position_angle=0, dposition_angle=0,\n loop=10000000, number_chi=10000, dchi_min=None, verbose=False, ellipse_error=0, sigma_result=1):\n from sora.extra import ChiSquare\n from sora.config.visuals import progressbar_show\n from astropy.coordinates import Angle\n from .core import Occultation\n\n v = {'dcenter_f': dcenter_f, 'dcenter_g': dcenter_g, 'doblateness': doblateness, 'dposition_angle': dposition_angle,\n 'dequatorial_radius': dequatorial_radius, 'ellipse_error': ellipse_error, 'sigma_result': sigma_result,\n 'dchi_min': dchi_min}\n for key, item in v.items():\n if item is not None and item < 0:\n raise ValueError(\"{} must be a positive number.\".format(key))\n\n values = []\n chord_name = []\n if len(args) == 0:\n raise ValueError('No occultation have been given as input.')\n for occ in args:\n if not isinstance(occ, Occultation):\n raise TypeError('Given argument must be an Occultation object.')\n for name, chord in occ.chords.items():\n if chord.status() == 'positive':\n if chord.is_able['immersion']:\n f, g, vf, vg = chord.get_fg(time='immersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.immersion_err\n values.append([f, g, err])\n chord_name.append(name + '_immersion')\n if chord.is_able['emersion']:\n f, g, vf, vg = chord.get_fg(time='emersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.emersion_err\n values.append([f, g, err])\n chord_name.append(name + '_emersion')\n\n controle_f0 = Time.now()\n f0_chi = np.array([])\n g0_chi = np.array([])\n a_chi = np.array([])\n obla_chi = np.array([])\n posang_chi = np.array([])\n chi2_best = np.array([])\n\n while len(f0_chi) < number_chi:\n progressbar_show(len(f0_chi), number_chi, prefix='Ellipse fit:')\n chi2 = np.zeros(loop)\n f0 = center_f + dcenter_f*(2*np.random.random(loop) - 1)\n g0 = center_g + dcenter_g*(2*np.random.random(loop) - 1)\n a = equatorial_radius + dequatorial_radius*(2*np.random.random(loop) - 1)\n obla = oblateness + doblateness*(2*np.random.random(loop) - 1)\n obla[obla < 0], obla[obla > 1] = 0, 1\n phi_deg = position_angle + dposition_angle*(2*np.random.random(loop) - 1)\n controle_f1 = Time.now()\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n f_model = f0 + r_model*np.cos(theta)\n g_model = g0 + r_model*np.sin(theta)\n chi2 += ((fi - f_model)**2 + (gi - g_model)**2)/(si**2 + ellipse_error**2)\n\n controle_f2 = Time.now()\n if dchi_min is not None:\n region = np.where(chi2 < chi2.min() + dchi_min)[0]\n else:\n region = np.arange(len(chi2))\n chi2_best = np.append(chi2_best, chi2[region])\n if verbose:\n print('Elapsed time: {:.3f} seconds.'.format((controle_f2 - controle_f1).sec))\n print(len(chi2[region]), len(chi2_best))\n f0_chi = np.append(f0_chi, f0[region])\n g0_chi = np.append(g0_chi, g0[region])\n a_chi = np.append(a_chi, a[region])\n obla_chi = np.append(obla_chi, obla[region])\n posang_chi = np.append(posang_chi, phi_deg[region])\n\n progressbar_show(number_chi, number_chi, prefix='Ellipse fit:')\n chisquare = ChiSquare(chi2_best, len(values), center_f=f0_chi, center_g=g0_chi, equatorial_radius=a_chi,\n oblateness=obla_chi, position_angle=posang_chi)\n controle_f4 = Time.now()\n if verbose:\n print('Total elapsed time: {:.3f} seconds.'.format((controle_f4 - controle_f0).sec))\n\n result_sigma = chisquare.get_nsigma(sigma=sigma_result)\n a = result_sigma['equatorial_radius'][0]\n f0 = result_sigma['center_f'][0]\n g0 = result_sigma['center_g'][0]\n obla = result_sigma['oblateness'][0]\n phi_deg = result_sigma['position_angle'][0]\n radial_dispersion = np.array([])\n error_bar = np.array([])\n position_angle_point = np.array([])\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n r = np.sqrt(dfi**2 + dgi**2)\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n radial_dispersion = np.append(radial_dispersion, r - r_model)\n error_bar = np.append(error_bar, si)\n position_angle_point = np.append(position_angle_point, Angle(90*u.deg - theta*u.rad).wrap_at(360 * u.deg).degree)\n\n for occ in args:\n if isinstance(occ, Occultation):\n occ.fitted_params = {i: result_sigma[i] for i in ['equatorial_radius', 'center_f', 'center_g',\n 'oblateness', 'position_angle']}\n occ.chi2_params = {'chord_name': chord_name, 'radial_dispersion': radial_dispersion,\n 'position_angle': position_angle_point, 'radial_error': error_bar,\n 'chi2_min': chisquare.get_nsigma(sigma=sigma_result)['chi2_min'],\n 'nparam': chisquare.nparam, 'npts': chisquare.npts}\n return chisquare", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def ellipse_dist_ratio(self, theta, lwr):\n\n #clear form of the code\n \"\"\"\n def focal_distance(theta, lwr):\n a = lwr\n b = 1.0\n\n #eccentricity\n # e = math.sqrt( (a**2 - b**2)/a**2)\n e = math.sqrt( (a**2 - 1.0) / a**2)\n\n dist_on_angle = a * (1.0 - e**2) / (1.0 - e*math.cos(theta))\n\n #when theta = 0, the dist formula becomes:\n #dist_forward = a * (1.0 - e**2) / (1.0 - e*math.cos(0))\n #dist_forward = a * (1.0 - e**2) / (1.0 - e*1)\n dist_forward = a * (1.0 - e**2) / (1.0 - e)\n\n return dist_on_angle / dist_forward\n\n \"\"\"\n #TESTED 3-10-16: This function evaluates identically to the above\n e = math.sqrt( (lwr**2 - 1.0) / lwr**2)\n e2 = (1.0 - e**2)\n dist_on_angle = lwr * e2 / (1.0 - e*math.cos(theta))\n dist_forward = lwr * e2 / (1.0 - e)\n\n return dist_on_angle / dist_forward", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def elliptic_arc(numbers, p_current, relative = False):\n if len(numbers) != 7:\n return None\n\n if any(numbers[:2]) == 0:\n return straight_line(numbers, p_current, relative)\n else:\n p_next = Point(numbers[5], numbers[6])\n if relative:\n p_next += p_curent\n\n return Ellipse(p_current, p_next, \n numbers[0], #radius x\n numbers[1], #radius y\n numbers[2], #angle\n numbers[3], #large flag\n numbers[4]) #sweep flag", "def ellipse(self,image,major_axis,minor_axis,i,j,c_x,c_y):\r\n \r\n if (((i-c_x)/(minor_axis+self.padding))**2 + ((j-c_y)/(major_axis+self.padding))**2)<=1:\r\n \r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def ci95_ellipse(data, type=\"pop\"):\n\n # Build and fit PCA model\n pca = PCA()\n pca.fit(data)\n coeff = pca.components_\n score = pca.transform(data)\n eigvals = pca.explained_variance_\n\n # Calculate rotation angle\n phi = math.atan2(coeff[0, 1], coeff[0, 0])\n\n # This angle is between -pi and pi.\n # Let's shift it such that the angle is between 0 and 2pi\n if phi < 0:\n phi += 2 * math.pi\n\n # Get the coordinates of the data mean\n n = len(data)\n m = np.mean(data, axis=0)\n x0 = m[0]\n y0 = m[1]\n\n # Get the 95% confidence interval error ellipse\n # inverse of the chi-square cumulative distribution for p = 0.05 & 2 d.f. = 5.9915\n chisquare_val = 5.9915\n if type is \"pop\":\n a = math.sqrt(chisquare_val * eigvals[0])\n b = math.sqrt(chisquare_val * eigvals[1])\n elif type is \"mean\":\n a = math.sqrt(chisquare_val * eigvals[0] / n)\n b = math.sqrt(chisquare_val * eigvals[1] / n)\n else:\n raise ValueError(\"type has to be 'pop' or 'mean'.\")\n\n # the ellipse in x and y coordinates\n theta_grid = np.linspace(0, 2 * math.pi, num=100)\n ellipse_x_r = a * np.cos(theta_grid)\n ellipse_y_r = b * np.sin(theta_grid)\n\n # Define a rotation matrix\n R = np.array([[np.cos(phi), np.sin(phi)], [-np.sin(phi), np.cos(phi)]])\n # let's rotate the ellipse to some angle phi\n r_ellipse = np.dot(np.vstack((ellipse_x_r, ellipse_y_r)).T, R)\n\n # Draw the error ellipse\n x = r_ellipse[:, 0] + x0\n y = r_ellipse[:, 1] + y0\n ellipse = np.stack((x, y), axis=1)\n\n outside = []\n for i in range(len(score)):\n metric = (score[i, 0] / a) ** 2 + (score[i, 1] / b) ** 2\n if metric > 1:\n outside.append(1)\n else:\n outside.append(0)\n\n return ellipse, outside", "def eccentricity(self):\n return sqrt(self.f * 2 - self.f ** 2)", "def fit_ellipse(x,y):\r\n \r\n def fit(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a\r\n \r\n def ellipse_center(a):\r\n b,c,d,f,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[0]\r\n num = b*b-a*c\r\n x0=(c*d-b*f)/num\r\n y0=(a*f-b*d)/num\r\n return np.array([x0,y0])\r\n \r\n def ellipse_angle_of_rotation(a):\r\n b,c,a = a[1]/2, a[2], a[0]\r\n return 0.5*np.arctan(2*b/(a-c))\r\n \r\n def ellipse_axis_length(a):\r\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\r\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\r\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n res1=np.sqrt(up/down1)\r\n res2=np.sqrt(up/down2)\r\n return np.array([res1, res2])\r\n \r\n a = fit(x,y)\r\n center = ellipse_center(a)\r\n theta = ellipse_angle_of_rotation(a)\r\n [R1,R2] = ellipse_axis_length(a)\r\n\r\n return R1, R2, center, theta", "def getMinVolEllipse(P, tolerance=0.01):\n (N, d) = np.shape(P)\n d = float(d)\n\n # Q will be our working array\n Q = np.vstack([np.copy(P.T), np.ones(N)]) \n QT = Q.T\n \n # initializations\n err = 1.0 + tolerance\n u = (1.0 / N) * np.ones(N)\n\n # Khachiyan Algorithm\n while err > tolerance:\n V = np.dot(Q, np.dot(np.diag(u), QT))\n M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix\n j = np.argmax(M)\n maximum = M[j]\n step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))\n new_u = (1.0 - step_size) * u\n new_u[j] += step_size\n err = np.linalg.norm(new_u - u)\n u = new_u\n\n # center of the ellipse \n center = np.dot(P.T, u)\n\n # the A matrix for the ellipse\n A = linalg.inv(\n np.dot(P.T, np.dot(np.diag(u), P)) - \n np.array([[a * b for b in center] for a in center])\n ) / d\n \n # Get the values we'd like to return\n U, s, rotation = linalg.svd(A)\n radii = 1.0/np.sqrt(s)\n\n rot_err = linalg.norm(np.identity(3)-abs(rotation))\n if(rot_err > 0.05):\n \tradii = np.array([radii[1],radii[0],radii[2]])\n return radii", "def DrawEllipse(*args, **kwargs):\n return _gdi_.DC_DrawEllipse(*args, **kwargs)", "def circle_contractivity_radius(self,acc=1.e-13,rmax=1000):\n from nodepy.utils import bisect\n\n tol=1.e-14\n r=bisect(0,rmax,acc,tol,self.__num__()._is_circle_contractive)\n return r", "def radon_ellipses(N,theta_vec, E, tvec_set=None, circle=False):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*N/2\n E[:,1] = E[:,1]*N/2\n E[:,2] = E[:,2]*N/2\n E[:,3] = E[:,3]*N/2\n E[:,4] = E[:,4]*math.pi/180\n \n [t_vec, grid_t, grid_theta] = build_t_theta_pixel(N, theta_vec, tvec_set=tvec_set, circle =circle);\n\n (nrowE,ncolE) = E.shape;\n tmp = np.zeros((nrowE,len(grid_theta)))\n for i in range(nrowE):\n grid_theta_new = grid_theta - E[i,4]\n x_new = (E[i,2]*np.cos(grid_theta)+E[i,3]*np.sin(grid_theta))\n y_new = (-E[i,2]*np.sin(grid_theta)+E[i,3]*np.cos(grid_theta))\n grid_t_new = (grid_t -x_new)/E[i,1]\n\n v1 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2 - grid_t_new**2\n cond = v1;\n v2 = np.zeros((v1.shape[0],1))\n for j in range (len(grid_theta)):\n if cond[j] > 0:\n v2[j]=1\n else:\n v2[j]=0\n #endif\n #endfor\n v3 = np.sqrt(v1*v2);\n v4 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2\n tmp[i,:] = np.transpose( 2*E[i,0]*E[i,5]*(v3/v4) )\n #endfor\n radvec = np.sum(tmp,axis = 0);\n analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))))\n return analytical_sinogram", "def add_circle(self, r_center, c_center, radius, color=BLUE, image=np.full((640, 480, 3), BLACK)):\n circle = np.fromfunction(lambda r, c, _: (r - r_center) ** 2 + (c - c_center) ** 2 <= radius ** 2, image.shape)\n return np.where(circle, color, image)", "def _get_radial(self):\n return self.startRadius is not None and self.endRadius is not None", "def incircle(self, a, b, c):\n m11, m12 = a.x - self.x, a.y - self.y\n m13 = m11 * m11 + m12 * m12\n m21, m22 = b.x - self.x, b.y - self.y\n m23 = m21 * m21 + m22 * m22\n m31, m32 = c.x - self.x, c.y - self.y\n m33 = m31 * m31 + m32 * m32\n det1 = m11 * (m22 * m33 - m23 * m32)\n det2 = m12 * (m21 * m33 - m23 * m31)\n det3 = m13 * (m21 * m32 - m22 * m31)\n return near(det1 - det2 + det3, 0)", "def circle_area(pop):\n\treturn math.pi * pop / (200.0 ** 2)", "def circle(self, p, radius, **kwargs):\n cx, cy = self._sky2img(p)\n self._draw.ellipse([cx-radius, cy-radius, cx+radius, cy+radius], **kwargs)", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def check_ellipse(self) -> list or None:\n x_text = self.ui.lineEditEX.text()\n xc = float(x_text) if checker.check_float(x_text) else None\n y_text = self.ui.lineEditEY.text()\n yc = float(y_text) if checker.check_float(y_text) else None\n a_text = self.ui.lineEditEA.text()\n a = float(a_text) if (checker.check_float(a_text) and\n float(a_text) > 0) else None\n b_text = self.ui.lineEditEB.text()\n b = float(b_text) if (checker.check_float(b_text) and\n float(b_text) > 0) else None\n\n if xc is None or yc is None or a is None or b is None:\n return None\n else:\n return [xc, yc, a, b]", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)", "def ellipse_area(semi_major_axis: number, semi_minor_axis : number) -> number:\n area = pi*semi_major_axis*semi_minor_axis\n return area", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def circumference(self):\n return self.width + self.height", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def circle_center(self):\n return self.container.width / 2, self.container.height / 2", "def HollowCircle(self,center=(0,0),inner_radius=1.0,outer_radius=2.,element_type='tri',isotropic=True,nrad=5,ncirc=10):\n\n # FOR SAFETY, RESET THE CLASS\n self.__reset__()\n\n if np.allclose(inner_radius,0):\n raise ValueError('inner_radius cannot be zero')\n\n t = np.linspace(0,2*np.pi,ncirc+1)\n if isotropic is True:\n radii = np.linspace(inner_radius,outer_radius,nrad+1)\n else:\n base = 3\n radii = np.zeros(nrad+1,dtype=np.float64)\n mm = np.linspace(np.power(inner_radius,1./base),np.power(outer_radius,1./base),nrad+1)\n for i in range(0,nrad+1):\n radii[i] = mm[i]**base\n\n\n # base = 3\n # mm = np.linspace(np.power(inner_radius,1./base),np.power(2.,1./base),nrad+1)\n # mm = np.append(mm,np.linspace(2,outer_radius,nrad+1))\n # radii = np.zeros(mm.shape[0],dtype=np.float64)\n # for i in range(0,mm.shape[0]):\n # radii[i] = mm[i]**base\n\n\n # dd = np.logspace(inner_radius,outer_radius,nrad+1,base=2)/2**np.linspace(inner_radius,outer_radius,nrad+1)\n # print dd*np.linspace(inner_radius,outer_radius,nrad+1)\n # print np.logspace(0,1.5,nrad+1,base=2)\n\n\n xy = np.zeros((radii.shape[0]*t.shape[0],2),dtype=np.float64)\n for i in range(0,radii.shape[0]):\n xy[i*t.shape[0]:(i+1)*t.shape[0],0] = radii[i]*np.cos(t)\n xy[i*t.shape[0]:(i+1)*t.shape[0],1] = radii[i]*np.sin(t)\n\n\n # REMOVE DUPLICATES GENERATED BY SIN/COS OF LINSPACE\n xy = xy[np.setdiff1d( np.arange(xy.shape[0]) , np.linspace(t.shape[0]-1,xy.shape[0]-1,radii.shape[0]).astype(int) ),:]\n\n connec = np.zeros((1,4),dtype=np.int64)\n\n for j in range(1,radii.shape[0]):\n for i in range((j-1)*(t.shape[0]-1),j*(t.shape[0]-1)):\n if i<j*(t.shape[0]-1)-1:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,t.shape[0]+i,i+1 ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,t.shape[0]+i,i+1),)\n else:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1) ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1)),)\n\n connec = connec[1:,:]\n # connec = np.asarray(connec[1:])\n\n\n if element_type == 'tri':\n connec_tri = np.zeros((2*connec.shape[0],3),dtype=np.int64)\n for i in range(connec.shape[0]):\n connec_tri[2*i,:] = np.array([connec[i,0],connec[i,1],connec[i,3]])\n connec_tri[2*i+1,:] = np.array([connec[i,2],connec[i,3],connec[i,1]])\n\n self.elements = connec_tri\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n # OBTAIN MESH EDGES\n self.GetBoundaryEdgesTri()\n\n elif element_type == 'quad':\n self.elements = connec\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n self.GetBoundaryEdgesQuad()\n\n # ASSIGN NODAL COORDINATES\n self.points = xy\n # IF CENTER IS DIFFERENT FROM (0,0)\n self.points[:,0] += center[0]\n self.points[:,1] += center[1]\n # ASSIGN PROPERTIES\n self.nnode = self.points.shape[0]", "def inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = ##\n\n count = ##\n\n return count", "def to_ellipse(self, factor=1.0):\n self._check_initialized()\n vals, vecs = sp.linalg.eigh(self.covariance)\n order = vals.argsort()[::-1]\n vals, vecs = vals[order], vecs[:, order]\n angle = np.arctan2(*vecs[:, 0][::-1])\n width, height = factor * np.sqrt(vals)\n return angle, width, height", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))", "def cone(individual, position, height, width):\n value = 0.0\n for x, p in zip(individual, position):\n value += (x - p)**2\n return height - width * math.sqrt(value)", "def ellipse_radius_at_angle(ellipses_np, query_angles):\n\n # ensure that all arrays have the right dimensions\n radii = np.reshape(ellipses_np[:, 2:4], (-1, 2)) # make sure it's an n-by-2 matrix\n ellipse_angle = np.reshape(ellipses_np[:, 4], (-1, 1)) # make sure it's a column vector\n query_angles = np.reshape(query_angles, (1, -1)) # make sure it's a row vector\n\n # calculate radii\n a = np.deg2rad(query_angles - ellipse_angle)\n prod = np.prod(radii, 1, keepdims=True)\n sqr_sum = np.sqrt((radii[:, None, 0] * np.sin(a)) ** 2 + (radii[:, None, 1] * np.cos(a)) ** 2) # None to keep dims\n\n return prod / sqr_sum", "def get_circ(event,x,y,flags,param):\t\n global frame,frameCenter#,diameter\n click = 0\n if event == cv2.EVENT_LBUTTONDOWN:\n #draw circles where user clicks\n cv2.circle(frame,(x,y),1,(0,0,0),-1)\n cv2.imshow('Click 6 points on circle', frame)\n diameter.append(np.sqrt((frameCenter[0]-x)**2 + (frameCenter[1]-y)**2))\n return diameter", "def DrawEllipse(*args, **kwargs):\n return _gdi_.PseudoDC_DrawEllipse(*args, **kwargs)", "def get_roi_circle_len(self):\n return len(self.circle_list)", "def ellipse(radii = (10,5), angle_resolution = 2.5, layer = 0):\n D = Device(name = 'ellipse')\n a = radii[0]\n b = radii[1]\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n r = a*b / (sqrt((b*cos(t))**2 + (a*sin(t))**2))\n xpts = r*cos(t)\n ypts = r*sin(t)\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def check_circle(self) -> list or None:\n x_text = self.ui.lineEditCX.text()\n xc = float(x_text) if checker.check_float(x_text) else None\n y_text = self.ui.lineEditCY.text()\n yc = float(y_text) if checker.check_float(y_text) else None\n radius_text = self.ui.lineEditRad.text()\n radius = (float(radius_text) if checker.check_float(radius_text) and\n float(radius_text) > 0 else None)\n if xc is None or yc is None or radius is None:\n return None\n else:\n return [xc, yc, radius]", "def ellipseArea(semiMajor, semiMinor):\n semiMajor = float(semiMajor)\n semiMinor = float(semiMinor)\n return math.pi*semiMajor*semiMinor", "def area_of_circle(radius = radious):\n area = radius * radious * 3.142\n print(\"Calculating area...\")\n time.sleep(2)\n return area", "def curvature(self):\n return self.circle().curvature(self.o, self.r, p = self.a)" ]
[ "0.7989176", "0.6924032", "0.68412817", "0.68127716", "0.6727333", "0.66740966", "0.6610501", "0.6582428", "0.6562776", "0.65454847", "0.6515111", "0.6455083", "0.6424359", "0.6401664", "0.6375693", "0.6350507", "0.6339286", "0.6323839", "0.62589574", "0.6257008", "0.6223934", "0.6204388", "0.6202427", "0.6197281", "0.6184294", "0.6178246", "0.6178191", "0.6172791", "0.6163451", "0.6151035", "0.6138952", "0.6131437", "0.61292356", "0.61292356", "0.61255777", "0.6103071", "0.60854155", "0.6083419", "0.60773623", "0.6067941", "0.6062774", "0.60411596", "0.6040244", "0.60371697", "0.602394", "0.60145473", "0.60129666", "0.60128516", "0.60087776", "0.6007604", "0.60011315", "0.5999918", "0.59994334", "0.598933", "0.5989045", "0.5985888", "0.59806705", "0.5972629", "0.59614354", "0.59586275", "0.5952205", "0.5946704", "0.59341466", "0.59300846", "0.59297574", "0.59051365", "0.59029865", "0.58912444", "0.5885863", "0.58843124", "0.5877529", "0.5875424", "0.5870374", "0.58652395", "0.5853384", "0.5852797", "0.58516073", "0.58467513", "0.58333874", "0.5829909", "0.5827486", "0.58262837", "0.5824522", "0.58177406", "0.58154875", "0.58149314", "0.58147055", "0.5808639", "0.5803289", "0.5798101", "0.57884145", "0.57854843", "0.57716805", "0.5769608", "0.57622194", "0.57612044", "0.5758687", "0.5755964", "0.57531744", "0.574594" ]
0.6321363
18
Finds the convex hull of a point set by checking a curve for convexity defects and corrects it.
def __CalculateConvexHull(self, contour): return cv2.convexHull(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper", "def convex(points):\r\n if isinstance(points, np.ndarray):\r\n points = np.unique(points, axis=0)\r\n else:\r\n pts = []\r\n points = [pts.append(i) for i in points if i not in pts] # Remove duplicates\r\n del pts\r\n if len(points) <= 1:\r\n return points\r\n # Build lower hull\r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n #print(\"lower\\n{}\\nupper\\n{}\".format(lower, upper))\r\n return np.array(lower[:-1] + upper) # upper[:-1]) # for open loop\r", "def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]", "def give_convex_hull(rand_points):\n return ConvexHull(rand_points)", "def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]", "def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)", "def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)", "def test_conv_full(self):\n\n points = np.array([[1, 4], [2, 1], [3, 2], [3, 3], [3, 5], [4, 2], [5, 1], [5, 3]]) # example of points \n \n cv_hull = convex_hull.convex_hull(points) # convex hull returned by the function \n\n right_conv_hull = np.array([[2, 1], [5, 1], [5, 3], [3, 5], [1, 4], [2, 1] ]) # right convex hull\n self.assertTrue((right_conv_hull == cv_hull).all())", "def convex_hull(self):\n if isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"not implemented for geographical coordinate \"\n \"systems. Project to a projected coordinate system.\")\n\n points = [pt for pt in self]\n\n # Find the lowermost (left?) point\n pt0 = points[0]\n idx = 0\n for i, pt in enumerate(points[1:]):\n if (pt.y < pt0.y) or ((pt.y == pt0.y) and (pt.x < pt0.x)):\n pt0 = pt\n idx = i+1\n points.pop(idx)\n\n # Sort CCW relative to pt0, and drop all but farthest of any duplicates\n points.sort(key=lambda pt: pt0.distance(pt))\n points.sort(key=lambda pt: _cvectorgeo.polarangle(pt0.vertex, pt.vertex))\n alpha = -1\n drop = []\n for i,pt in enumerate(points):\n a = _cvectorgeo.polarangle(pt0.vertex, pt.vertex)\n if a == alpha:\n drop.append(i)\n else:\n alpha = a\n\n if len(drop) != 0:\n for i in drop[::-1]:\n points.pop(i)\n\n # initialize convex hull\n if len(points) == 2:\n return Polygon([pt0, points[0], points[1]])\n elif len(points) == 1:\n raise GeometryError(\"convex polygon not defined for two points\")\n else:\n\n S = [pt0, points[0], points[1]]\n for pt in points[2:]:\n while not _cvectorgeo.isleft(S[-2].vertex, S[-1].vertex, pt.vertex):\n S.pop()\n S.append(pt)\n\n return Polygon(S, crs=self.crs)", "def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)", "def convex_hull(l):\n\tpass", "def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []", "def convexHull(points):\n points = np.append(points, [[0, 0, 0]], axis=0) # All points plus origin\n hull = ConvexHull(points) # Visible points plus possible origin. Use its vertices property.\n\n return hull", "def getHull(x_data, y_data):\n xhull = []\n yhull = []\n if len(x_data) == 0 or len(y_data) == 0:\n return xhull, yhull\n xhull.append(x_data[0])\n yhull.append(y_data[0])\n\n lasthullindex = 0\n\n points = len(y_data)\n while lasthullindex < points - 1:\n slope = (y_data[lasthullindex + 1] - y_data[lasthullindex]) / (\n x_data[lasthullindex + 1] - x_data[lasthullindex])\n currenthullindex = lasthullindex + 1\n currenthully = y_data[lasthullindex]\n\n for i in range(currenthullindex + 1, points):\n extrapolation = currenthully + slope * (x_data[i] - x_data[lasthullindex])\n if y_data[i] < extrapolation:\n slope = ((y_data[i] - y_data[lasthullindex]) / (x_data[i] - x_data[lasthullindex]))\n currenthullindex = i\n\n # Store the hull points to be used for a spline fit\n xhull.append(x_data[currenthullindex])\n yhull.append(y_data[currenthullindex])\n lasthullindex = currenthullindex\n\n return xhull, yhull", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def convex_hull(L):\r\n CH=list()\r\n if L != []:\r\n P = list(L)\r\n # find the starting point of the algorithm and add it to the convex hull:\r\n ind0 = find_start(P)\r\n CH.append(P.pop(ind0))\r\n # find the next point and add it to the convex hull list CH:\r\n if P != []:\r\n ind1 = next_in_hull(CH[0], np.array([1,0]), P)\r\n CH.append(P.pop(ind1))\r\n # use the hyperplane criterion as function side_points to complete CH:\r\n while P != []:\r\n p = CH[-2]\r\n q = CH[-1]\r\n v = q - p \r\n P = side_points(CH[0], CH[-1] - CH[0], P)\r\n ind = next_in_hull(q, v, P)\r\n if P != []:\r\n CH.append(P.pop(ind))\r\n return CH", "def get_hull_points(self, show_progress):\n if self.points and not self.hull_points:\n self.graham_scan(show_progress)\n print(\"Input: {} points\").format(len(self.points))\n print(\"Convex hull: {} points\").format(len(self.hull_points))\n return self.hull_points", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def convexHull(hist, start_p = 0, end_p = 99999999, ignore = None):\n start_p = int(round(start_p))\n end_p = int(round(end_p))\n\n if end_p - start_p < 5 and (start_p !=0 or end_p != 99999999):\n return np.array(hist)\n\n hist = np.array(hist)\n\n if end_p > len(hist) :\n end_p = len(hist)\n\n hist_x = list(range(start_p, end_p))\n hist_y = np.array(hist[hist_x], dtype=np.float32)\n\n if len(hist_x) < 5:\n return np.array(hist)\n\n hist_y2 = hist_y.copy()\n if ignore is not None:\n ignore2 = ignore[hist_x]\n hist_y2[ignore2] = int(max(hist_y2)*1.5)\n\n hull_x, hull_y = getHull(hist_x, hist_y2)\n hull = getSubtractedHist(hist_x, hist_y, hull_x, hull_y)\n\n ret = list(np.zeros(start_p))\n ret.extend(hull)\n ret.extend(np.zeros(len(hist)-end_p))\n\n if ignore is not None:\n sub = np.array(ret)\n sub[ignore] = 0\n ret = list(sub)\n\n return ret", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def hull_convex(ob, me, selected_only, precision = 0.1):\n # find convex hull\n vertices, triangles = pyffi.utils.quickhull.qhull3d(\n [tuple(v.co) for v in me.verts if v.sel or not selected_only],\n precision = precision)\n # create convex mesh\n box = Blender.Mesh.New('convexpoly')\n for vert in vertices:\n box.verts.extend(*vert)\n for triangle in triangles:\n box.faces.extend(triangle)\n # link mesh to scene and set transform\n scn = Blender.Scene.GetCurrent()\n boxob = scn.objects.new(box, 'convexpoly')\n boxob.setMatrix(ob.getMatrix('worldspace'))\n # set bounds type\n boxob.drawType = Blender.Object.DrawTypes['BOUNDBOX']\n boxob.rbShapeBoundType = 5 # convex hull shape not in blender Python API; Blender.Object.RBShapes['CONVEXHULL']?\n boxob.drawMode = Blender.Object.DrawModes['WIRE']", "def isConvexApproximate(data, boundaryPointsDict, triangleDict, approximation, tolerance):\n outliersAllowed = int(np.floor(tolerance * len(list(boundaryPointsDict.keys()))))\n\n outliersCount = 0\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= approximation:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n outliersCount += 1\n if outliersCount > outliersAllowed:\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def QHull(points: List[sg.Point2]) -> List[sg.Segment2]:\n point_list = copy.copy(points)\n hull_points = []\n points.sort(key=lambda point: point.x())\n mn = points[0]\n mx = points[-1]\n hull_points.append(mn)\n hull_points.append(mx)\n point_list.remove(mn)\n point_list.remove(mx)\n seg = sg.Segment2(mn, mx)\n # a line between the left most and right most point\n s1, s2 = split_points(point_list, seg)\n _FindHull(s1, mn, mx, hull_points)\n _FindHull(s2, mx, mn, hull_points)\n return points_to_segment(hull_points)", "def isConvex(data, boundaryPointsDict, triangleDict, approximation ,demo = True):\n\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= epsilon:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n if demo:\n plotDemo(data, point, bdrPntIdx)\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def convexify(domain):\n\n if isinstance(domain, isl.BasicSet):\n return domain\n\n dom_bsets = domain.get_basic_sets()\n if len(dom_bsets) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n domain = domain.coalesce()\n\n dom_bsets = domain.get_basic_sets()\n if len(domain.get_basic_sets()) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n dom_bsets = domain.get_basic_sets()\n assert len(dom_bsets) > 1\n\n print(\"PIECES:\")\n for dbs in dom_bsets:\n print(\" %s\" % (isl.Set.from_basic_set(dbs).gist(domain)))\n raise NotImplementedError(\"Could not find convex representation of set\")", "def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):\n\texists = os.path.isdir('plots')\n\tif not exists: \n\t\tos.mkdir('plots')\n\n\n\tfor each in points:\n\t\tplt.plot(each[0],each[1],'o-')\n\n\tif hull_points is not None:\n\t\thull_pt_list = []\n\t\tfor each in hull_points:\n\t\t\thull_pt_list.append(list(each))\n\n\t\thull_pt_arr = np.asarray(hull_pt_list)\n\t\t# print(hull_pt_arr)\n\t\tplt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')\n\t\tfirst_coord = hull_pt_arr[0,:].reshape(1,2)\n\t\tlast_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)\n\n\t\tlast_coord_arr = np.append(first_coord, last_coord, axis = 0)\n\t\tplt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')\n\t\tplt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\\n'+'N='+str(size))\n\t\n\tplt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')\n\tplt.show()", "def teselado(self,points):\n #muestrea todo el espacio de la envolvente para conseguir las fronteras de decision a intervalos regulares \n #get_hull\n area = boundingbox(points)\n #(min_x,min_y),(max_x,min_y),(max_x,max_y),(min_x,max_y)\n #sample inside hull\n lat_sample = np.arange(min_y, max_y, 0.001).tolist()\n lon_sample = np.arange(min_x,max_x, 0.001)\n sampling_space = [[x,y] for x in lat_sample for y in lon_sample]\n sampling_space = np.asarray(sampling_space, dtype=np.float32)\n prediction = kmeans_instance.predict(sampling_space)\n pol = [[list(sampling_space[index]) for index in [i for i, j in enumerate(prediction) if j == k]] for k in range(centers)]\n hull = [shapely.geometry.MultiPoint(pol[i]).convex_hull.exterior._get_coords() for i in range(len(pol))]", "def concave_hull(hull:list, points:list, max_iterations:int=None, min_length_fraction:float=0, min_angle:float=90)->list:\n tweet.info(\"Creating concave hull; minimum side length {}% of average, minimum_angle {}\".format(min_length_fraction * 100, min_angle))\n test_points = set(points)\n ignore_points = []\n avg_sqr_distance = 0\n for k in range(0, len(hull)-1):\n avg_sqr_distance += point_sqr_distance(hull[k], hull[k+1])\n test_points.remove(hull[k])\n avg_sqr_distance /= len(hull) - 1\n min_sqr_length = avg_sqr_distance * (min_length_fraction ** 2) # since we get sqr_length, we square the fraction\n min_cosine = math.cos(math.radians(min_angle))\n \n while (max_iterations is None or max_iterations > 0) and test_points:\n selection, edge = select_longest_edge(hull, ignore_points, min_sqr_length)\n tweet.info(\"Considering edge {}; {} points left\".format(edge, len(test_points)))\n if selection is None:\n break\n selected_point = select_candidate_point(edge, test_points, hull, min_cosine)\n if selected_point is None:\n # This edge has no more candidate points, so we ignore it in the next pass\n ignore_points.append(edge[0])\n tweet.debug(\"No candidate point found.\")\n continue\n tweet.debug(\"Found point {}, inserting new edge.\".format(selected_point))\n if not max_iterations is None:\n max_iterations -= 1\n # We add the point into the concave hull\n hull.insert(selection + 1, selected_point)\n test_points.remove(selected_point)\n return hull", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def _is_p_inside_points_hull(points, p):\n\n from scipy.spatial import ConvexHull\n\n hull = ConvexHull(points)\n new_points = np.append(points, p, axis=0)\n new_hull = ConvexHull(new_points)\n if list(hull.vertices) == list(new_hull.vertices):\n return True\n else:\n return False", "def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)", "def _convex_hull_side(image, start, end):\n\n convex_points = [start]\n\n x_start, y_start = start\n x_end, y_end = end\n\n side = (x_start <= x_end, y_start <= y_end)\n\n\n ranges = {\n (True, True): [\n [x_start + 1, x_end + 1],\n [y_start, y_end + 1],\n False\n ],\n (False, True): [\n [y_start + 1, y_end + 1],\n [x_start, x_end - 1, -1],\n True\n ],\n (False, False): [\n [x_start - 1, x_end - 1, -1],\n [y_start, y_end - 1, -1],\n False\n ],\n (True, False): [\n [y_start - 1, y_end - 1, -1],\n [x_start, x_end + 1],\n True\n ]\n }\n\n prev = 0\n\n for outer in range(*ranges[side][0]):\n\n curr_pixel = None\n\n for inner in range(*ranges[side][1]):\n if ranges[side][2] and image[outer, inner] == 0:\n curr_pixel = (inner, outer)\n break\n elif not ranges[side][2] and image[inner, outer] == 0:\n curr_pixel = (outer, inner)\n break\n\n if curr_pixel is None:\n continue\n\n while True:\n # slope infinite for first point\n prev_slope = (\n float(\"-inf\") if prev == 0\n else slope(\n convex_points[prev - 1],\n convex_points[prev],\n ranges[side][2]))\n\n # remove previous point if it yields concave segment\n if prev_slope > slope(\n convex_points[prev],\n curr_pixel,\n ranges[side][2]\n ):\n convex_points.pop(prev)\n prev -= 1\n # add point to hull if it yields convex segment\n else:\n convex_points.append(curr_pixel)\n prev += 1\n break\n\n return convex_points[1:]", "def test_convexHullFacetArea(self):\n try:\n import pyhull\n except ImportError:\n self.skipTest(\"Pyhull (optional) is not available so cannot compute facet area.\")\n \n # make points\n N = 8\n pts = [0, 0, 0,\n 3, 0, 0,\n 0, 3, 0,\n 0, 0, 3,\n 3, 3, 0,\n 0, 3, 3,\n 3, 0, 3,\n 3, 3, 3]\n \n # calc volume\n volume, facetArea = clusters.findConvexHullVolume(N, pts)\n \n self.assertAlmostEqual(facetArea, 54.0)", "def final_check(points):\n for p in points:\n constraints = p.constraints\n if len(p.lies_on) == 1:\n if [x for x in p.lies_on][0].symify() is not None:\n p.x, p.y = [x for x in p.lies_on][0].arbitrary_point()\n return p\n else:\n return None\n elif len(p.lies_on) > 1:\n symified_constraints = []\n # Get all of the constraints that we have processed and given\n # locations to\n for x in p.lies_on:\n tmp = x.symify()\n if tmp is not None:\n symified_constraints.append(tmp)\n # Ensure that we have at least two constraints\n # (to define an intersection); if not go to the next point\n if len(symified_constraints) >= 2:\n # Compute the intersection\n intersection = sympy.intersection(*symified_constraints)\n if intersection:\n p.x = float(intersection[0].x)\n p.y = float(intersection[0].y)\n return p\n else:\n return None\n\n if all([type(c) == primitives.Line for c in constraints]):\n p.x = random.uniform(-1, 1)\n p.y = random.uniform(-1, 1)\n return p\n elif [type(c) for c in constraints].count(primitives.Circle) == 1:\n for c in constraints:\n if type(c) == primitives.Circle:\n circle = c\n break\n if circle.symify() is not None:\n p.x, p.y = circle.arbitrary_point()\n return p\n return None", "def _quickhull(self, pt1, pt2, point_list):\n if not point_list:\n return []\n pt3 = max(point_list, key=lambda p: oriented_area(pt1, p, pt2))\n # Nie trzeba dzielic przez abs(pt2-pt1).\n list1 = self._points_on_the_right(pt1, pt3, point_list)\n list2 = self._points_on_the_right(pt3, pt2, point_list)\n return (self._quickhull(pt1, pt3, list1) + [pt3]\n + self._quickhull(pt3, pt2, list2))", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True", "def smallest_ellipse(points, tol = 0.001):\n points = np.asmatrix(points)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n \n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d\n return np.asarray(A), np.squeeze(np.asarray(c))", "def _check_curve(layer: ogr.Layer) -> None:\n # Check if the feature geometry is polygonal:\n feature_defn = layer.GetLayerDefn()\n layer.ResetReading()\n feature = layer.GetNextFeature()\n while feature is not None:\n geom = feature.GetGeometryRef()\n name_wkt = geom.ExportToWkt()\n\n # Approximate a curvature by a polygon geometry:\n if 'curv' in name_wkt.lower():\n linear_geom = geom.GetLinearGeometry()\n new_feature = ogr.Feature(feature_defn)\n new_feature.SetGeometryDirectly(linear_geom)\n layer.CreateFeature(new_feature)\n layer.DeleteFeature(feature.GetFID())\n\n feature = layer.GetNextFeature()", "def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", "def main():\n points = np.array(\n [[1, 1], [2, 5], [3, 2], [4, 4], [5, 2], [6, 3], [2, 3], [3, 4], [5, 3]]\n )\n hull = graham_scan(points)\n hull = np.concatenate((hull, [hull[0]]))\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(points[:, 0], points[:, 1])\n ax.plot(hull[:, 0], hull[:, 1], 'r')\n ax.set_title('Convex Hull using Graham Scan')\n plt.show()", "def find_convex_hull(ctx: Context):\n if ctx.contours is None or len(ctx.contours) == 0:\n return 0\n\n max_defects = 0\n defects = None\n contour = None\n for c in ctx.contours:\n ctx.hull = cv2.convexHull(c, False, False, False)\n if ctx.hull is not None:\n d = cv2.convexityDefects(c, ctx.hull, None)\n if d is not None:\n if len(d) <= max_defects:\n continue\n ctx.num_defects = len(d)\n max_defects = ctx.num_defects\n defects = d\n contour = c\n\n if defects is None:\n return\n\n # calculate hand center via mean of defect depth points\n x = 0\n y = 0\n for d in defects:\n depth_point = contour[d[0][2]]\n x += depth_point[0][0]\n y += depth_point[0][1]\n\n x = int(x / len(defects))\n y = int(y / len(defects))\n ctx.hand_center = (x, y)\n\n # calculate hand radius as mean of distances\n dist = 0\n for d in defects:\n depth_point = contour[d[0][2]]\n dx, dy = depth_point[0][0], depth_point[0][1]\n dist += math.sqrt(math.pow(x - dx, 2) * math.pow(y - dy, 2))\n ctx.hand_radius = int(dist / len(defects))", "def compute_convex_hull_volume(points):\n try:\n return ConvexHull(points).volume\n except:\n return 0", "def decomposing_poly_cut_by_set_op(P, v, w, epsilon=10e-2):\n\n\n\tgetcontext().prec = 28\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: W not on chain\")\n\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\t# Generate pairs of v and w modified by some epsilon amount \n\tv_l_displacements = [distance_to_v+(i*epsilon) for i in [-1, -2, 0]]\n\tv_r_displacements = [(distance_to_v+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\tw_l_displacements = [distance_to_w+(i*epsilon) for i in [-1, -2, 0]]\n\tw_r_displacements = [(distance_to_w+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\n\tdef splice_polygon(dist_v, dist_w):\n\t\t\"\"\"Portion of decomposing_line_cut_by_splicing wihtout points\n\n\t\tFunction for evaluating validity of candidates\n\t\t\"\"\"\n\n\t\tif dist_w >= chain.length or dist_w == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_v)\n\n\t\t\tp_l = left_chain.coords[:]\n\t\t\tp_r = right_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\t\tif dist_v >= chain.length or dist_v == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_w)\n\n\t\t\tp_l = right_chain.coords[:]\n\t\t\tp_r = left_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\n\t\tif dist_w%chain.length > dist_v%chain.length:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_w_chain).difference(LineString(left_v_chain))\n\n\t\t\tp_l = left_v_chain.coords[:]+right_w_chain.coords[:-1]\n\t\t\tp_r = common.coords[:]\n\n\t\t\treturn p_l, p_r\n\n\t\telse:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_v_chain).difference(LineString(left_w_chain))\n\n\t\t\tp_l = common.coords[:]\n\t\t\tp_r = left_w_chain.coords[:]+right_v_chain.coords[:-1]\n\n\t\t\treturn p_l, p_r\n\n\t# Check every ring for self-intersection, if cut is invalid => self-intersec\n\tfound = False\n\tfor i in range(len(v_l_displacements)):\n\t\tfor j in range(len(w_l_displacements)):\n\n\t\t\t# Check if resultant polygons are valid\n\t\t\tp_l, p_r = splice_polygon(v_l_displacements[i], w_r_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\tp_l, p_r = splice_polygon(v_r_displacements[i], w_l_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\t# Else, we have a valid candidate cut\n\t\t\tfound = True\n\t\t\tbreak\n\n\t\tif found:\n\t\t\tbreak\n\n\tif not found:\n\t\tprint(\"splice_polygon: No correct cut combination found!\")\n\t\treturn\n\n\tv_l = chain.interpolate(v_l_displacements[i]).coords[:]\n\tv_r = chain.interpolate(v_r_displacements[i]).coords[:]\n\tw_l = chain.interpolate(w_l_displacements[j]).coords[:]\n\tw_r = chain.interpolate(w_r_displacements[j]).coords[:]\n\n\tdef get_verts(v_l, v_r):\n\t\t\"\"\"Function for extraction verts between two points\n\t\t\"\"\"\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points\n\n\t# Find all vertecies of the chain betwee v_l and v_r\n\tv_pts = get_verts(v_l_displacements[i], v_r_displacements[i])\n\tw_pts = get_verts(w_l_displacements[j], w_r_displacements[j])\n\n\tpoly = Polygon(*P)\n\n\tcut_poly = Polygon(v_l+v_pts+v_r+w_l+w_pts+w_r)\n\tcut_poly = poly.intersection(cut_poly)\n\n\tprint cut_poly\n\n\tp_l, p_r = poly.difference(cut_poly)\n\n\tprint p_l\n\tprint p_r", "def visualHull(sils, length):\n result = sils.pop(0).cone(length)\n assert result.pnFacesInPoly()\n i = 0\n for s in sils:\n # print(i)\n assert result.pnFacesInPoly()\n result = result.intersection(s.cone(length), True)\n # result.plot()\n i += 1\n return result", "def convex_line_segment(point_list:list, desc_y:bool=False, desc_x:bool=False)->list:\n if len(point_list) < 3:\n return point_list\n line = []\n x_extrema = None\n # Since the list is sorted by x second, the last point is actually the\n # first point of the last block of y values in the list (if more than\n # one coordinate has the minimum y value).\n last_point = point_list[-1]\n test_point = -2\n while point_list[test_point][1] == last_point[1]:\n last_point = point_list[test_point]\n test_point -= 1\n for point in point_list:\n # We end when we get to the last point. Points with the same y-value, but\n # more inside x-value won't be on the polygon.\n if point == last_point: \n break\n # We skip points that are left of the point we have added already.\n if not x_extrema is None:\n if desc_x and x_extrema >= point[0]:\n continue\n elif not desc_x and x_extrema <= point[0]:\n continue\n # If the line is empty, we just add it.\n if not line:\n line.append(point)\n x_extrema = point[0]\n continue\n dir = direction(line[-1], point, last_point)\n if not desc_y == desc_x:\n dir *= -1\n if dir > 0: # if and only if the polygon stays convex by adding this point...\n if len(line) > 1 and collinear(line[-2], line[-1], point):\n # We remove collinear points to match what Graham's scan does.\n del line[-1]\n line.append(point)\n x_extrema = point[0]\n # We end by adding the last point to the list to complete the line.\n line.append(last_point)\n return line", "def test_convex_init(self):\n print(\"Convex_Init\")\n finder = dc.dock.ConvexHullPocketFinder()", "def points_inside_poly(points, all_verts):\n return Path(all_verts, close=True).contains_points(points)", "def convex(self, *args, **kwargs) -> Any:\n pass", "def in_hull(p, hull, border= True, tol = 0 ):\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n \n if border:\n return hull.find_simplex(p,tol = tol)>=0\n else:\n return hull.find_simplex(p,tol = tol)>0", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron", "def __plot_convex_hull(self, ax=None) -> None:\n ax.plot(self.points[:, 0], self.points[:, 1], \"o\")\n for simplex in self.hull.simplices:\n ax.plot(self.points[simplex, 0], self.points[simplex, 1], \"k-\")", "def addPoint(self, p):\n p = np.asarray(p)\n idx = len(self.coords)\n # print(\"coords[\", idx,\"] ->\",p)\n self.coords.append(p)\n\n # Search the triangle(s) whose circumcircle contains p\n bad_triangles = []\n for T in self.triangles:\n # Choose one method: inCircleRobust(T, p) or inCircleFast(T, p)\n if self.inCircleFast(T, p):\n bad_triangles.append(T)\n\n # Find the CCW boundary (star shape) of the bad triangles,\n # expressed as a list of edges (point pairs) and the opposite\n # triangle to each edge.\n boundary = []\n # Choose a \"random\" triangle and edge\n T = bad_triangles[0]\n edge = 0\n # get the opposite triangle of this edge\n while True:\n # Check if edge of triangle T is on the boundary...\n # if opposite triangle of this edge is external to the list\n tri_op = self.triangles[T][edge]\n if tri_op not in bad_triangles:\n # Insert edge and external triangle into boundary list\n boundary.append((T[(edge+1) % 3], T[(edge-1) % 3], tri_op))\n\n # Move to next CCW edge in this triangle\n edge = (edge + 1) % 3\n\n # Check if boundary is a closed loop\n if boundary[0][0] == boundary[-1][1]:\n break\n else:\n # Move to next CCW edge in opposite triangle\n edge = (self.triangles[tri_op].index(T) + 1) % 3\n T = tri_op\n\n # Remove triangles too near of point p of our solution\n for T in bad_triangles:\n del self.triangles[T]\n del self.circles[T]\n\n # Retriangle the hole left by bad_triangles\n new_triangles = []\n for (e0, e1, tri_op) in boundary:\n # Create a new triangle using point p and edge extremes\n T = (idx, e0, e1)\n\n # Store circumcenter and circumradius of the triangle\n self.circles[T] = self.circumcenter(T)\n\n # Set opposite triangle of the edge as neighbour of T\n self.triangles[T] = [tri_op, None, None]\n\n # Try to set T as neighbour of the opposite triangle\n if tri_op:\n # search the neighbour of tri_op that use edge (e1, e0)\n for i, neigh in enumerate(self.triangles[tri_op]):\n if neigh:\n if e1 in neigh and e0 in neigh:\n # change link to use our new triangle\n self.triangles[tri_op][i] = T\n\n # Add triangle to a temporal list\n new_triangles.append(T)\n\n # Link the new triangles each another\n N = len(new_triangles)\n for i, T in enumerate(new_triangles):\n self.triangles[T][1] = new_triangles[(i+1) % N] # next\n self.triangles[T][2] = new_triangles[(i-1) % N] # previous", "def graham_scan(points):\n\n # Find point with smallest y coordinate\n # If two points have equal y coordinates, select the one with the lower x-coordinate\n smallest = points[0]\n for p in points:\n if p[1] < smallest[1]:\n smallest = p\n elif p[1] == smallest[1]:\n if p[0] < smallest[0]:\n smallest = p\n\n # Sort points by angle over smallest to x-axis\n points.sort(key=lambda x: angle(x, smallest))\n\n # Our stack\n hull = [smallest, points[1]]\n i = 2\n while i < len(points):\n # If the last points and the new point form a counter-clockwise triangle,\n # we need the last point. Therefore, push the new point\n if ccw(hull[-2], hull[-1], points[i]) > 0 or len(hull) == 2:\n hull.append(points[i])\n i += 1\n # If the two last points and the new point don't form a counter-clockwise triangle,\n # the we don't need the last point\n else:\n hull.pop()\n return hull", "def optimal_polygon(y, w=0.5, debug=False):\n # Make sure that we use numpy array\n y = np.array(y)\n x = np.arange(len(y))\n\n # Initialization\n y = np.round(y, 6)\n p_plus = (x[0], y[0] + w)\n l_plus = (x[0], y[0] + w)\n r_plus = (x[1], y[1] + w)\n s_plus = {(x[0], y[0] + w): (x[1], y[1] + w)}\n t_plus = {(x[1], y[1] + w): (x[0], y[0] + w)}\n p_minus = (x[0], y[0] - w)\n l_minus = (x[0], y[0] - w)\n r_minus = (x[1], y[1] - w)\n s_minus = {(x[0], y[0] - w): (x[1], y[1] - w)}\n t_minus = {(x[1], y[1] - w): (x[0], y[0] - w)}\n q = []\n i = 2\n\n while i < len(y):\n # Updating CH_plus (convex hull) and CH_minus\n p = (x[i - 1], y[i - 1] + w)\n p_i_plus = (x[i], y[i] + w)\n while (p != p_plus) and _angle(p_i_plus, p, t_plus[p], '+') > np.pi:\n p = t_plus[p]\n s_plus[p] = p_i_plus\n t_plus[p_i_plus] = p\n\n p = (x[i - 1], y[i - 1] - w)\n p_i_minus = (x[i], y[i] - w)\n while (p != p_minus) and _angle(p_i_minus, p, t_minus[p], '-') > np.pi:\n p = t_minus[p]\n s_minus[p] = p_i_minus\n t_minus[p_i_minus] = p\n\n # Check if CH_plus and CH_minus intersect\n if _angle(p_i_plus, l_plus, r_minus, '+') < np.pi:\n q.append((_intersect(l_plus, r_minus, p_plus, p_minus), l_plus, r_minus, p_plus, p_minus))\n p_minus = r_minus\n p_plus = _intersect(l_plus, r_minus, (x[i - 1], y[i - 1] + w), p_i_plus)\n s_plus[p_plus] = p_i_plus\n t_plus[p_i_plus] = p_plus\n r_plus = p_i_plus\n r_minus = p_i_minus\n l_plus = p_plus\n l_minus = p_minus\n while _angle(l_minus, r_plus, s_minus[l_minus], '-') < np.pi:\n l_minus = s_minus[l_minus]\n elif _angle(p_i_minus, l_minus, r_plus, '-') < np.pi:\n q.append((_intersect(l_minus, r_plus, p_minus, p_plus), l_minus, r_plus, p_minus, p_plus))\n p_plus = r_plus\n p_minus = _intersect(l_minus, r_plus, (x[i - 1], y[i - 1] - w), p_i_minus)\n s_minus[p_minus] = p_i_minus\n t_minus[p_i_minus] = p_minus\n r_minus = p_i_minus\n r_plus = p_i_plus\n l_minus = p_minus\n l_plus = p_plus\n while _angle(l_plus, r_minus, s_plus[l_plus], '+') < np.pi:\n l_plus = s_plus[l_plus]\n else:\n # Updating the two seperating and supporting lines\n if _angle(p_i_plus, l_minus, r_plus, '+') < np.pi:\n r_plus = p_i_plus\n while _angle(p_i_plus, l_minus, s_minus[l_minus], '+') < np.pi:\n l_minus = s_minus[l_minus]\n\n if _angle(p_i_minus, l_plus, r_minus, '-') < np.pi:\n r_minus = p_i_minus\n while _angle(p_i_minus, l_plus, s_plus[l_plus], '-') < np.pi:\n l_plus = s_plus[l_plus]\n i += 1\n\n # Add last change point\n a = _intersect(l_plus, r_minus, p_plus, p_minus)\n b = _intersect(l_minus, r_plus, p_minus, p_plus)\n p = ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)\n q.append((p, r_minus, r_plus, p_minus, p_plus))\n\n end_a = _intersect(p, r_plus, p_i_minus, p_i_plus)\n end_b = _intersect(p, r_minus, p_i_minus, p_i_plus)\n end = ((end_a[0] + end_b[0]) / 2, (end_a[1] + end_b[1]) / 2)\n q.append((end, (None, None), (None, None), p_i_minus, p_i_plus))\n\n if debug:\n return np.array(q)\n else:\n return np.array([o[0] for o in q])", "def is_purecomp_hull(grid, simplex):\n dim = grid.shape[0]\n points = grid[:,simplex]\n flags = np.zeros(dim)\n for ind,pt in enumerate(points):\n flags[ind] = is_nzero_comp(dim-1,pt)\n \n if np.sum(flags)==dim:\n return True\n else:\n return False", "def convex_hull(image):\n\n corners = find_corners(image)\n\n\n vertices = [corners[0]]\n\n for i in range(len(corners)):\n vertices.extend(\n _convex_hull_side(\n image, corners[i], corners[(i + 1) % len(corners)]))\n\n return vertices", "def projection_iterhull(poly1, new_dim, max_iter=1000,\n verbose=0, abs_tol=ABS_TOL):\n r, xc = cheby_ball(poly1)\n org_dim = poly1.A.shape[1]\n logger.debug(\"Starting iterhull projection from dim \" +\n str(org_dim) + \" to dim \" + str(len(new_dim)))\n if len(new_dim) == 1:\n f1 = np.zeros(poly1.A.shape[1])\n f1[new_dim] = 1\n sol = lpsolve(f1, poly1.A, poly1.b)\n if sol['status'] == 0:\n vert1 = sol['x']\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n sol = lpsolve(np.negative(f1), poly1.A, poly1.b)\n if sol['status'] == 0:\n vert2 = sol['x']\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n vert = np.vstack([vert1, vert2])\n return qhull(vert, abs_tol=abs_tol)\n else:\n OK = False\n cnt = 0\n Vert = None\n while not OK:\n # Maximizing in random directions\n # to find a starting simplex\n cnt += 1\n if cnt > max_iter:\n raise Exception(\"iterative_hull: \"\n \"could not find starting simplex\")\n f1 = np.random.rand(len(new_dim)).flatten() - 0.5\n f = np.zeros(org_dim)\n f[new_dim] = f1\n sol = lpsolve(np.negative(f), poly1.A, poly1.b)\n xopt = np.array(sol['x']).flatten()\n if Vert is None:\n Vert = xopt.reshape(1, xopt.size)\n else:\n k = np.nonzero(Vert[:, new_dim[0]] == xopt[new_dim[0]])[0]\n for j in new_dim[range(1, len(new_dim))]:\n ii = np.nonzero(Vert[k, j] == xopt[j])[0]\n k = k[ii]\n if k.size == 0:\n break\n if k.size == 0:\n Vert = np.vstack([Vert, xopt])\n if Vert.shape[0] > len(new_dim):\n u, s, v = np.linalg.svd(\n np.transpose(Vert[:, new_dim] - Vert[0, new_dim]))\n rank = np.sum(s > abs_tol * 10)\n if rank == len(new_dim):\n # If rank full we have found a starting simplex\n OK = True\n logger.debug(\"Found starting simplex after \" +\n str(cnt) + \" iterations\")\n cnt = 0\n P1 = qhull(Vert[:, new_dim], abs_tol=abs_tol)\n HP = None\n while True:\n # Iteration:\n # Maximaze in direction of each facet\n # Take convex hull of all vertices\n cnt += 1\n if cnt > max_iter:\n raise Exception(\"iterative_hull: \"\n \"maximum number of iterations reached\")\n logger.debug(\"Iteration number \" + str(cnt))\n for ind in range(P1.A.shape[0]):\n f1 = np.round(P1.A[ind, :] / abs_tol) * abs_tol\n f2 = np.hstack([np.round(P1.A[ind, :] / abs_tol) * abs_tol,\n np.round(P1.b[ind] / abs_tol) * abs_tol])\n # See if already stored\n k = np.array([])\n if HP is not None:\n k = np.nonzero(HP[:, 0] == f2[0])[0]\n for j in range(1, np.shape(P1.A)[1] + 1):\n ii = np.nonzero(HP[k, j] == f2[j])[0]\n k = k[ii]\n if k.size == 0:\n break\n if k.size == 1:\n # Already stored\n xopt = HP[\n k,\n range(\n np.shape(P1.A)[1] + 1,\n np.shape(P1.A)[1] + np.shape(Vert)[1] + 1)\n ]\n else:\n # Solving optimization to find new vertex\n f = np.zeros(poly1.A.shape[1])\n f[new_dim] = f1\n sol = lpsolve(np.negative(f), poly1.A, poly1.b)\n if sol['status'] != 0:\n logger.error(\"iterhull: LP failure\")\n continue\n xopt = np.array(sol['x']).flatten()\n add = np.hstack([f2, np.round(xopt / abs_tol) * abs_tol])\n # Add new half plane information\n # HP format: [ P1.Ai P1.bi xopt]\n if HP is None:\n HP = add.reshape(1, add.size)\n else:\n HP = np.vstack([HP, add])\n Vert = np.vstack([Vert, xopt])\n logger.debug(\"Taking convex hull of new points\")\n P2 = qhull(Vert[:, new_dim], abs_tol=abs_tol)\n logger.debug(\"Checking if new points are inside convex hull\")\n OK = 1\n for i in range(np.shape(Vert)[0]):\n if not P1.contains(np.transpose([Vert[i, new_dim]]),\n abs_tol=abs_tol):\n # If all new points are inside\n # old polytope -> Finished\n OK = 0\n break\n if OK == 1:\n logger.debug(\"Returning projection after \" +\n str(cnt) + \" iterations\\n\")\n return P2\n else:\n # Iterate\n P1 = P2", "def convex_hull_model(self, _start='random', _direction='random'):\n steps = [{'Tour': [], 'Tourlength': 0}]\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n\n if nodes:\n # Step 1: Sketch the connections between adjacent boundary\n # points of the convex hull.\n # hull is a list of ids, not nodes,the hull is always generated CW\n hull = tsputil.convex_hull_helper(nodes)\n # Step 2: Select a starting point and a direction (randomly).\n # start is an id not a node\n startinfo = get_direction_and_start(nodes, _start, _direction)\n start = startinfo[0]\n # if direction is ccw ,reverse hull\n if not startinfo[1] == 1:\n hull.reverse()\n\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n # Step 3: If the starting point is on the boundary,\n # the starting node is the current node. \"\"\"\n if start in hull:\n # The arc connecting the current node to the adjacent boundary\n # node in the direc- tion of travel is referred to as the\n # current arc.\n cn_index = hull.index(start)\n current_node = hull[cn_index]\n # get adjacent node\n an_index = (cn_index + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Proceed immediately to Step 4.\"\"\"\n else:\n # If the starting point is not on the boundary, apply the\n # insertion rule to find the closest arc on the boundary. \"\"\"\n closest_arc = find_closest_arc(start, hull, nodes)\n # Connect the starting point to the end node of the closest\n # arc which is in the direction of travel.\n # This node becomes the current node.\"\"\"\n # insert startnode into hull\n hull.insert(hull.index(closest_arc[0]) + 1, start)\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n # update current arc nodes\n current_node = start\n adjacent_node = hull[hull.index(closest_arc[1])]\n # Step 4: Apply the insertion criterion to identify which\n # unconnected interior point is closest to the current arc.\n # repeat step 4 and 5 until all nodes are included in the path\n while len(hull) <= len(nodes):\n while True:\n current_arc = (current_node, adjacent_node)\n # find closest node not in the hull\n interior_node = find_closest_interior_node(current_arc, hull, nodes)\n # Apply the insertion criterion to check whether the\n # closest node is closer to any other arc.\n is_closer = is_closer_to_other_arc(interior_node, current_arc, hull, nodes)\n # If not, proceed to Step 5. If it is, move to the end node of\n # the current arc. This becomes the current node. Repeat\n # Step 4.\n if not is_closer:\n break\n else:\n current_node = current_arc[1]\n an_index = (hull.index(current_node) + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Step 5: Insert the closest node. The connection between the\n # current node and the newly inserted node becomes the current arc.\n # Retaining the current node, return to Step 4 and repeat Steps 4 and\n # 5 until a complete tour is obtained\"\"\"\n hull.insert(hull.index(current_node) + 1, interior_node)\n adjacent_node = interior_node\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n self._datacontroller.commit_change('pathsteps', steps)\n self._datacontroller.commit_change('path', steps[-1])", "def validate_clockwise_points(points):\n \n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n \n point = [\n [int(points[0]) , int(points[1])],\n [int(points[2]) , int(points[3])],\n [int(points[4]) , int(points[5])],\n [int(points[6]) , int(points[7])]\n ]\n edge = [\n ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),\n ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),\n ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),\n ( point[0][0] - point[3][0])*( point[0][1] + point[3][1])\n ]\n \n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n return summatory <= 0", "def graham_scan(points: np.ndarray) -> np.ndarray:\n primary, remaining_points = extract_primary(points)\n sorted_points = sort_for_graham_scan(remaining_points, primary)\n hull = find_hull_vertices(sorted_points)\n return hull", "def __draw_curve(self, points):\n x_pts = []\n y_pts = []\n curvex = []\n curvey = []\n self.debug += 1\n for point in points:\n x_pts.append(point[0])\n y_pts.append(point[1])\n curve = scipy.interpolate.interp1d(x_pts, y_pts, 'cubic')\n if self.debug == 1 or self.debug == 2:\n for i in np.arange(x_pts[0], x_pts[len(x_pts) - 1] + 1, 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n else:\n for i in np.arange(x_pts[len(x_pts) - 1] + 1, x_pts[0], 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n return curvex, curvey", "def graham_scan(points):\n if len(points) <= 3:\n return points\n pointList = ExtendedTupleList(points)\n complete_range = pointList.range_within(0, 1)\n first_point = (complete_range[1][\"min\"][1], complete_range[1][\"min\"][0])\n newPoints = ExtendedTupleList([])\n for point in pointList:\n square_dist, cosine = line_length_angle((first_point, point))\n new_point = (point[0], point[1], square_dist, cosine)\n newPoints.append(new_point)\n newPoints.double_sort(3, 2, reverse_outside = True, reverse_inside = True)\n hull = ExtendedTupleList([])\n hull.append(first_point)\n hull.append(newPoints[0])\n lastAngle = newPoints[0][3]\n for k in range(1, len(newPoints)):\n if newPoints[k][3] == lastAngle:\n continue\n lastAngle = newPoints[k][3]\n while (len(hull) >= 2 and direction(hull[-2], hull[-1], newPoints[k]) >= 0):\n hull.pop()\n hull.append(newPoints[k])\n real_hull = []\n for point in hull:\n real_hull.append((point[0], point[1]))\n real_hull.append(real_hull[0])\n return real_hull", "def drawClippedPointSet (self, points, colour):\r\n\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n \r\n for pt in points:\r\n if pt [0] >= 0 and pt [0] < w and pt [1] >= 0 and pt [1] < h:\r\n self.image [pt [1]][pt [0]] = colour", "def refine(self, spline_data, tolerance=170.0, recursions=0):\r\n\r\n # self.spline_data = [coo, u, t, der1, der2, tck]\r\n xx, yy = spline_data[0]\r\n t = spline_data[2]\r\n tck = spline_data[5]\r\n\r\n logger.debug('\\nPoints before refining: {} \\n'.format(len(xx)))\r\n\r\n xn = copy.deepcopy(xx)\r\n yn = copy.deepcopy(yy)\r\n tn = copy.deepcopy(t)\r\n\r\n j = 0\r\n refinements = 0\r\n first = True\r\n refined = dict()\r\n\r\n for i in range(len(xx) - 2):\r\n refined[i] = False\r\n\r\n # angle between two contour line segments\r\n a = np.array([xx[i], yy[i]])\r\n b = np.array([xx[i + 1], yy[i + 1]])\r\n c = np.array([xx[i + 2], yy[i + 2]])\r\n angle = Utils.angle_between(a - b, c - b, degree=True)\r\n\r\n if angle < tolerance:\r\n\r\n logger.debug('Refining between segments {} {},'\r\n .format(i, i + 1))\r\n logger.debug('Tol={0:5.1f}, Angle={1:05.1f}\\n'\r\n .format(tolerance, angle))\r\n\r\n refined[i] = True\r\n refinements += 1\r\n\r\n # parameters for new points\r\n t1 = (t[i] + t[i + 1]) / 2.\r\n t2 = (t[i + 1] + t[i + 2]) / 2.\r\n\r\n # coordinates of new points\r\n p1 = interpolate.splev(t1, tck, der=0)\r\n p2 = interpolate.splev(t2, tck, der=0)\r\n\r\n # insert points and their parameters into arrays\r\n if i > 0 and not refined[i - 1]:\r\n xn = np.insert(xn, i + 1 + j, p1[0])\r\n yn = np.insert(yn, i + 1 + j, p1[1])\r\n tn = np.insert(tn, i + 1 + j, t1)\r\n j += 1\r\n xn = np.insert(xn, i + 2 + j, p2[0])\r\n yn = np.insert(yn, i + 2 + j, p2[1])\r\n tn = np.insert(tn, i + 2 + j, t2)\r\n j += 1\r\n\r\n if first and recursions > 0:\r\n logger.debug('Recursion level: {} \\n'.format(recursions))\r\n first = False\r\n\r\n logger.debug('Points after refining: {}'.format(len(xn)))\r\n\r\n # update coordinate array, including inserted points\r\n spline_data[0] = (xn, yn)\r\n # update parameter array, including parameters of inserted points\r\n spline_data[2] = tn\r\n\r\n # this is the recursion :)\r\n if refinements > 0:\r\n self.refine(spline_data, tolerance, recursions + 1)\r\n\r\n # stopping from recursion if no refinements done in this recursion\r\n else:\r\n # update derivatives, including inserted points\r\n spline_data[3] = interpolate.splev(tn, tck, der=1)\r\n spline_data[4] = interpolate.splev(tn, tck, der=2)\r\n\r\n logger.debug('No more refinements.')\r\n logger.debug('\\nTotal number of recursions: {}'\r\n .format(recursions - 1))\r\n\r\n # due to recursive call to refine, here no object can be returned\r\n # instead use self to transfer data to the outer world :)\r\n self.spline_data = copy.deepcopy(spline_data)\r\n return", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def polyhedra_from_xyz(xyz_file: str,\n try_convex_hull: bool = True)\\\n -> Tuple[List[Polyhedron],\\\n List[VertexCollection],\\\n List[Union[Polyhedron,VertexCollection]]]:\n\n object_coordinates_dict: Dict[str, List[List[float]]] = {}\n polyhedron_list: List[Polyhedron] = []\n vertex_collection_list: List[VertexCollection] = []\n object_list: List[Union[Polyhedron,VertexCollection]] = []\n type_order: List[str] = []\n with open(xyz_file, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n line = lines[i].strip()\n if i == 0:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n n_points = int(l.group())\n elif i == 1:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n dim = int(l.group())\n assert dim <= 3, 'We cannot visualise the fourth dimension and\\\n above.'\n else:\n if line == '':\n continue\n l = re.search(\"([A-Za-z]+[0-9]*)[\\s\\t]+\", line)\n assert l is not None\n point_type = l.group(1)\n l2 = re.findall(\"[+-]?\\d+\\.\\d*\", line)\n point_coordinates = []\n for coordinate in l2:\n point_coordinates.append(float(coordinate))\n assert len(point_coordinates) == dim\n if point_type not in object_coordinates_dict:\n object_coordinates_dict[point_type] = []\n object_coordinates_dict[point_type].append(point_coordinates)\n if point_type not in type_order:\n type_order.append(point_type)\n\n for point_type in type_order:\n object_coordinates = np.array(object_coordinates_dict[point_type])\n if try_convex_hull:\n try:\n print(\"Attempting to construct a convex hull for {}...\"\\\n .format(point_type))\n polyhedron = construct_convex_hull_from_coords\\\n (object_coordinates)\n polyhedron_list.append(polyhedron)\n object_list.append(polyhedron) \n except:\n print(\"Failed to construct a convex hull for {}.\"\\\n .format(point_type))\n print(\"Falling back to vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n else:\n print(\"Constructing a vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n\n return polyhedron_list,vertex_collection_list,object_list", "def try_update_points(self, points, s, t):\n\n p0, p1, p2, p3 = points\n p1_ = Vector(p0.x+s*(p1.x-p0.x), p0.y+s*(p1.y-p0.y), True)\n p2_ = Vector(p3.x+t*(p2.x-p3.x), p3.y+t*(p2.y-p3.y), True)\n perturbed_points = [p0, p1_, p2_, p3]\n if self.triangle_error_of(perturbed_points, easy_only=False) is not None:\n return None\n\n return p1_, p2_", "def poly_enclose(points, color, inc=1.2, rad=0.3, lw=2):\n points = np.log(points)\n hull = ConvexHull(points)\n\n cent = np.mean(points, 0)\n pts = []\n for pt in points[hull.simplices]:\n pts.append(pt[0].tolist())\n pts.append(pt[1].tolist())\n \n pts.sort(key=lambda p: np.arctan2(p[1] - cent[1],\n p[0] - cent[0]))\n pts = pts[0::2] # Deleting duplicates\n pts.insert(len(pts), pts[0])\n \n \n verts = inc*(np.array(pts)- cent) + cent\n verts2 = np.zeros((3*verts.shape[0]-2,2))\n verts2[0::3] = verts\n verts2[1::3,:] = (1-rad)*verts[0:-1,:] + rad*verts[1:,:]\n verts2[2::3,:] = rad*verts[0:-1,:] + (1-rad)*verts[1:,:]\n verts2[0:-1] = verts2[1:]\n verts2[-1] = verts2[0]\n\n\n \n codes = [Path.MOVETO, Path.LINETO, Path.CURVE3,]\n for j in range(len(pts)-2):\n codes.extend([Path.CURVE3, Path.LINETO, Path.CURVE3,])\n codes.append(Path.CURVE3)\n \n \n path = Path(verts2, codes)\n patch = patches.PathPatch(path, facecolor=color, lw=0, alpha=0.2)\n edge = patches.PathPatch(path, edgecolor=color, facecolor='none', lw=lw)\n patch._path._vertices = np.exp(patch._path._vertices)\n return patch, edge", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1,p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1,p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1, p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def constrain(obj_dict):\n\n point_set = set()\n for obj in obj_dict['point'].values():\n if obj.x is None and obj.y is None:\n if obj.random is True:\n obj.x = random.uniform(-1, 1)\n obj.y = random.uniform(-1, 1)\n else:\n point_set.add(obj)\n\n i = 0\n points = [y for y in point_set]\n\n old_len = None\n\n while points:\n if i == len(points):\n # Ensure that something has changed\n if old_len == len(points):\n p = final_check(points)\n if p is None:\n # If nothing has changed, error out\n error(name=\"Underconstrained system\",\n msg=\"Unable to place the following points: \" +\n str([x.name for x in points]))\n else:\n points.remove(p)\n i = 0\n continue\n # Modular arithmetic\n old_len = len(points)\n i = 0\n # Get the current point\n p = points[i]\n\n # If there are no constraints on the point, generate it randomly\n if len(p.constraints) == 0:\n p.x = random.uniform(-1, 1)\n p.y = random.uniform(-1, 1)\n points.remove(p)\n i = 0\n continue\n # If there is one constraint on the point, we can plcae it anywhere\n # on the constraint\n elif len(p.constraints) == 1:\n obj = list(p.constraints)[0]\n # Check the symify constraint because of the fact that arbitrary\n # point calls it\n if obj is None or obj.symify() is None:\n i += 1\n continue\n\n p.x, p.y = obj.arbitrary_point()\n\n # Start the iteration over\n i = 0\n points.remove(p)\n # Otherwise, there are multiple constraints\n else:\n symified_constraints = []\n # Get all of the constraints that we have processed and given\n # locations to\n for x in p.constraints:\n tmp = x.symify()\n if tmp is not None:\n symified_constraints.append(tmp)\n # Ensure that we have at least two constraints\n # (to define an intersection); if not go to the next point\n if len(symified_constraints) < 2:\n i += 1\n continue\n # Compute the intersection\n intersection = sympy.intersection(*symified_constraints)\n\n # If there was no intersection, continue\n if intersection == []:\n i += 1\n continue\n # Otherwise, we have a list of possible intersections, pick\n # one of them randomly to use\n else:\n r = random.randint(0, len(intersection) - 1)\n p.x = float(intersection[r].x)\n p.y = float(intersection[r].y)\n i = 0\n points.remove(p)", "def get_corner_node_prune(lcurve):\r\n rho, eta = np.zeros(len(lcurve)), np.zeros(len(lcurve))\r\n for lia in range(len(lcurve)):\r\n rho[lia] = lcurve[lia][0]\r\n eta[lia] = lcurve[lia][1]\r\n\r\n if len(rho) != len(eta):\r\n raise ValueError(\"both arrays must have the same size\")\r\n fin = np.isfinite(rho + eta)\r\n nzr = np.array([False]*len(rho))\r\n nzr[np.nonzero(rho*eta)[0]] = True\r\n keep = fin & nzr\r\n if len(keep) < 1:\r\n raise ValueError(\"To few accepted data found\")\r\n if len(keep) < len(rho):\r\n print(\"I had to trim the data due to NaN/Inf or zero values\")\r\n rho = rho[keep]\r\n eta = eta[keep]\r\n if np.any(rho[:-1] < rho[1:]) or np.any(eta[:-1] > eta[1:]):\r\n print(\"Warning: L-curve lacks monotonicity\")\r\n nP = len(rho) # number of points\r\n P = np.log10(np.array([rho, eta])).T # Coordinates of the loglog L-curve\r\n V = P[1:, :] - P[:-1, :] # The vectors defined by these coord\r\n v = np.sqrt(np.sum(V**2, axis=1)) # length of the vectors\r\n # W = V/np.tile(v, (1, 2)); # Normalized vectors.\r\n W = np.zeros(V.shape)\r\n W[:, 0] = V[:, 0]/v\r\n W[:, 1] = V[:, 1]/v\r\n clist = [] # list of condidates\r\n p = np.min([5, nP]) # number of vectors in pruned L-curve\r\n # convex = 0 # Are the pruned L-curves convex\r\n Ind = np.argsort(v)[::-1] # Sort lengths decending\r\n while p < (nP-1)*2:\r\n elmts = np.sort(Ind[:np.min([p, nP-1])])\r\n candidate = Angles(W[elmts, :], elmts)\r\n # print(\"candidate p={}, {}\".format(p, candidate))\r\n if candidate not in clist:\r\n clist.append(candidate)\r\n candidate = Global_Behaviour(P, W[elmts, :], elmts)\r\n if candidate not in clist:\r\n clist.append(candidate)\r\n p = p*2\r\n # print(clist)\r\n if 0 not in clist:\r\n clist.insert(0, 0)\r\n clist = np.sort(clist)\r\n\r\n vz = np.argwhere(np.diff(P[clist, 1]) >= np.abs(np.diff(P[clist, 0])))\r\n if len(vz) > 1:\r\n if vz[0] == 0:\r\n vz = vz[1:]\r\n elif len(vz) == 1:\r\n if vz[0] == 0:\r\n vz = []\r\n if vz == [] or len(vz) == 0:\r\n # if vz.size <= 0:\r\n index = clist[-1]\r\n else:\r\n vects = np.array([P[clist[1:], 0] - P[clist[:-1], 0], P[clist[1:], 1] - P[clist[:-1], 1]]).T\r\n vects = np.dot(np.diag(1/np.sqrt(np.sum(vects**2, 1))), vects)\r\n delta = vects[:-1, 0] * vects[1:, 1] - vects[1:, 0] * vects[:-1, 1]\r\n vv = np.argwhere(delta[vz-1] <= 0)\r\n # print(vv)\r\n # print(vz)\r\n if vv == [] or len(vv) == 0:\r\n # if vv.size <= 0:\r\n index = clist[vz[-1]]\r\n else:\r\n index = clist[vz[vv[0]]]\r\n\r\n try:\r\n retval = int(index)\r\n except TypeError:\r\n print(\"index!!!!: {}\".format(index))\r\n retval = int(index[0])\r\n return retval", "def validate_clockwise_points(points):\n\n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n\n point = [\n [int(points[0]), int(points[1])],\n [int(points[2]), int(points[3])],\n [int(points[4]), int(points[5])],\n [int(points[6]), int(points[7])]\n ]\n edge = [\n (point[1][0] - point[0][0]) * (point[1][1] + point[0][1]),\n (point[2][0] - point[1][0]) * (point[2][1] + point[1][1]),\n (point[3][0] - point[2][0]) * (point[3][1] + point[2][1]),\n (point[0][0] - point[3][0]) * (point[0][1] + point[3][1])\n ]\n\n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n if summatory > 0:\n raise Exception(\n \"Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.\")", "def test_get_convex_hull(self):\n\n these_vertex_indices = skeleton_lines._get_convex_hull(\n vertex_x_coords=VERTEX_X_COORDS[END_NODE_VERTEX_INDICES],\n vertex_y_coords=VERTEX_Y_COORDS[END_NODE_VERTEX_INDICES])\n\n expected_indices = numpy.linspace(\n 0, len(END_NODE_VERTEX_INDICES) - 1,\n num=len(END_NODE_VERTEX_INDICES), dtype=int)\n self.assertTrue(numpy.array_equal(\n these_vertex_indices, expected_indices))", "def _tValueForPointOnCubicCurve(point, cubicCurve, isHorizontal=0):\n pt1, pt2, pt3, pt4 = cubicCurve\n a, b, c, d = bezierTools.calcCubicParameters(pt1, pt2, pt3, pt4)\n solutions = bezierTools.solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],\n d[isHorizontal] - point[isHorizontal])\n solutions = [t for t in solutions if 0 <= t < 1]\n if not solutions and not isHorizontal:\n # can happen that a horizontal line doens intersect, try the vertical\n return _tValueForPointOnCubicCurve(point, (pt1, pt2, pt3, pt4), isHorizontal=1)\n if len(solutions) > 1:\n intersectionLenghts = {}\n for t in solutions:\n tp = _getCubicPoint(t, pt1, pt2, pt3, pt4)\n dist = _distance(tp, point)\n intersectionLenghts[dist] = t\n minDist = min(intersectionLenghts.keys())\n solutions = [intersectionLenghts[minDist]]\n return solutions", "def convex_hull_calculator_mp(arr: np.ndarray, px_per_mm: float) -> float:\n arr = np.unique(arr, axis=0).astype(int)\n if arr.shape[0] < 3:\n return 0\n for i in range(1, arr.shape[0]):\n if (arr[i] != arr[0]).all():\n try:\n return ConvexHull(arr).area / px_per_mm\n except QhullError:\n return 0\n else:\n pass\n return 0", "def convex_hull_intersection(self, p1, p2):\n inter_p = self.polygon_clip(p1,p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def sort_for_graham_scan(points: np.ndarray, primary: np.ndarray) -> np.ndarray:\n point_slopes = np.array([v[1] / v[0] for v in points])\n sorted_indexes = np.argsort(point_slopes)\n sorted_points = np.array(points)[sorted_indexes]\n hull = np.concatenate(\n (sorted_points[-1:], [primary], sorted_points)\n )\n return hull", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def fitEllipseDirect(points):\r\n x = points[:, 0]; y = points[:, 1];\r\n # Build design matrix\r\n D = np.vstack((x*x, x*y, y*y, x, y, np.ones(x.shape)))\r\n # Build scatter matrix\r\n S = D.dot(D.T)\r\n # Build constraint matrix\r\n C = np.zeros((6, 6))\r\n C[0, 2]= +2; C[1, 1]= -1; C[2, 0]= +2;\r\n # Solve generalised eigenvalue system C*a == l*S*a\r\n geval, gevec = linalg.eig(S, C)\r\n # Find the eigenvector with the only pozitive eigenvalue\r\n geval = np.real(geval)\r\n i = np.argmax((geval>0) * np.isfinite(geval))\r\n if not np.isfinite(geval[i]):\r\n raise linalg.LinAlgError(\r\n \"Eigenvalue calculation failed to return a valid answer.\" +\r\n \"\\nEigenvalues:\\n\" + str(geval) + '\\n')\r\n theVec = np.real(gevec[:, i])\r\n # That vector has the parameters of the ellipse\r\n return tuple(theVec.flatten())", "def qhull(vertices, abs_tol=ABS_TOL):\n A, b, vert = quickhull(vertices, abs_tol=abs_tol)\n if A.size == 0:\n return Polytope()\n return Polytope(A, b, minrep=True, vertices=vert)", "def test_find_triangle(self):\n points = np.array([[2.435, -3.37], [2.435, -1.82], [2.635, -2.], [2.535, -1.7]])\n connectivity_list = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.intp)\n point = np.array([2.6, -1.9])\n self.assertEqual(1, find_triangle(point, points, connectivity_list))\n point = np.array([3., 1.]) # outside of defined vertices\n self.assertEqual(-1, find_triangle(point, points, connectivity_list))", "def shrink_vertex(hull_vertices, inside, shrinking_threshold):\n hull = create_hull(hull_vertices)\n hull, max_edge_length = sort_hull(hull)\n avg_edge_length = average_distance(inside)\n\n if max_edge_length < avg_edge_length:\n # mark current hull as released, compute new hull from remaining points\n new_hull_vertices, inside = convex_hull(inside)\n new_hull_vertices, released = shrink_vertex(new_hull_vertices, inside)\n return new_hull_vertices, np.append(released, hull_vertices, axis=0)\n\n all_points = np.append(inside, hull_vertices, axis=0)\n\n if len(all_points) < 3:\n # nothing to shrink\n return hull.vertex, np.zeros((0, 2))\n\n while max_edge_length >= shrinking_threshold * avg_edge_length:\n V1 = hull[0].vertex\n V2 = hull[1].vertex\n V21 = V2 - V1\n V21dot = np.dot(V21, V21)\n\n edges = list(\n zip(hull.vertex[1:],\n np.append(hull.vertex[2:], [hull.vertex[0]], axis=0)))\n\n candidates = []\n for P in all_points:\n # find closest point from x to the line between V1 and V2:\n # 1) its projection falls between V1 and V2\n # 2) it resides on the left of V1 and V2\n # 3) the perpendicular line from P to the line between V1 and V2\n # doesn't have an intersection with other edges between vertices\n\n PV1 = P - V1\n u = np.dot(PV1, V21) / V21dot\n\n if not (0-eps <= u <= 1+eps):\n # 1) failed\n continue\n\n M = np.vstack((np.array([V1, V2, P]).T,[1,1,1]))\n if np.linalg.det(M) <= 0+eps:\n # 2) failed\n continue\n\n # get projected point\n PP = V1 + u*V21\n PPP = PP - P\n\n num_intersections = 0\n for i, edge in enumerate(edges):\n\n if array_equal(P, edge[0]) or array_equal(P, edge[1]):\n # a point always intersects with its own edge\n continue\n\n has_intersec = seg_intersect(P, PPP, edge[0], edge[1]-edge[0])\n if not has_intersec:\n # no intersection with this edge\n continue\n\n # we found an intersection. These are only allowed if the\n # candidate vertex is either the V_last or V3...\n if array_equal(P, hull[-1].vertex) or \\\n array_equal(P, hull[2].vertex):\n continue\n\n # otherwise this is an invalid intersection\n num_intersections += 1\n if num_intersections > 1:\n # only one intersection is allowed at max\n # (see condition below)\n break\n\n if num_intersections == 0 or \\\n num_intersections == 1 and (0-eps <= u <= 0+eps or\n 1-eps <= u <= 1+eps):\n # add point if it has no intersection or the only intersection\n # is at V1 or V2. This happens if u == 0 or u == 1.\n candidates.append((P, dist(P, PP)))\n\n if len(candidates) == 0:\n # no candidate for shrinking found\n hull[0].is_processed = True\n\n if all(hull.is_processed):\n # finished search\n break\n else:\n # add closest point to hull between V1 and V2\n Q = min(candidates, key = lambda t: t[1])[0]\n # update edge length\n hull[0].length = dist(V1, Q)\n hull = np.insert(hull, 1, (Q, dist(Q, V2), False), axis=0)\n\n hull, max_edge_length = sort_hull(hull)\n\n # the original releasing has not been implemented -> return an empty array\n return hull.vertex, np.zeros((0, 2))", "def point_in_poly(x_point: float, y_point: float) -> bool:\n\n # Semi-F47 extended states all devices should be able to ride out a sag of up to 1 cycle.\n if x_point <= 1:\n return False\n\n point = shapely.geometry.Point(x_point, y_point)\n return POLYGON.contains(point) or POLYGON.intersects(point)", "def find_crits(self, tol=1e-3, pts_per_curve=1000, \n out_dir='./', out_file=None, load=False, exact=False):\n self._data = dict()\n\n if self.cf.rank == 0 and not os.path.exists('{:s}'.format(out_dir)):\n os.mkdir('{:s}'.format(out_dir))\n if out_file == None:\n out_file = '{:s}/hydro_onset'.format(out_dir)\n\n self.cf = CriticalFinder(self.solve_problem, CW)\n # If no tasks specified, set the atmospheric defaults,\n # find the crits, and store the curves\n self.atmo_kwargs = self._atmo_kwargs\n mins, maxs, ns, logs = [],[],[],[]\n for l in (self._ra_steps, self._kx_steps, self._ky_steps):\n if type(l) == type(None):\n continue\n mins.append(l[0])\n maxs.append(l[1])\n ns.append(l[2])\n logs.append(l[3])\n mins, maxs = np.array(mins), np.array(maxs)\n ns, logs = np.array(ns, dtype=np.int64), np.array(logs)\n if load:\n try:\n self.cf.load_grid('{:s}/{:s}.h5'.format(out_dir, out_file), logs=logs)\n except:\n self.cf.grid_generator(mins, maxs, ns, logs=logs)\n if self.cf.comm.rank == 0:\n self.cf.save_grid('{:s}/{:s}'.format(out_dir, out_file))\n else:\n self.cf.grid_generator(mins, maxs, ns, logs=logs)\n if self.cf.comm.rank == 0:\n self.cf.save_grid('{:s}/{:s}'.format(out_dir, out_file))\n self.cf.root_finder()\n if self.cf.comm.rank == 0:\n self.cf.plot_crit(title= '{:s}/{:s}'.format(out_dir, out_file), xlabel='kx', ylabel='Ra', transpose=True)\n\n if exact:\n crits = self.cf.exact_crit_finder()\n else:\n crits = self.cf.crit_finder()\n\n if len(crits) == 2:\n ra_crit, kx_crit = crits\n if self.cf.rank == 0:\n logger.info('Critical value found at ra: {:.5g}, kx: {:.5g}'.format(ra_crit, kx_crit)) \n elif len(crits) == 3:\n ra_crit, kx_crit, ky_crit = crits\n k_tot = np.sqrt(kx_crit**2 + ky_crit**2)\n if self.cf.rank == 0:\n logger.info('Critical value found at ra: {:.5g}, kx: {:.5g}, ky: {:.5g}, ktot = {:.5g}'.format(ra_crit, kx_crit, ky_crit, k_tot)) \n if self.cf.comm.rank == 0:\n self.cf.save_grid('{:s}/{:s}'.format(out_dir, out_file))\n self.cf.plot_crit(title= '{:s}/{:s}'.format(out_dir, out_file), xlabel='kx', ylabel='Ra', transpose=True)", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def delaunay_triangle_calculation(rect, points):\n # creating the subdiv class\n subdiv = cv2.Subdiv2D(rect)\n\n # Insert points into subdiv class\n for p in points:\n subdiv.insert(p)\n\n triangle_list = subdiv.getTriangleList()\n\n delaunay_tri = []\n pt = []\n\n for t in triangle_list:\n pt.append((t[0], t[1]))\n pt1 = (t[0], t[1])\n\n pt.append((t[2], t[3]))\n pt2 = (t[2], t[3])\n\n pt.append((t[4], t[5]))\n pt3 = (t[4], t[5])\n\n if in_rectangle(rect, pt1) and in_rectangle(rect, pt2) and in_rectangle(rect, pt3):\n ind = []\n # Get face-points (from 68 face detector) by coordinates\n for j in range(0, 3):\n for k in range(0, len(points)):\n if abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0:\n ind.append(k)\n\n if len(ind) == 3:\n delaunay_tri.append((ind[0], ind[1], ind[2]))\n\n pt = []\n\n return delaunay_tri", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)", "def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def solve(points):\n # it's not a list\n if not isinstance(points, list):\n raise TypeError(\"solve expects a list of n Point objects, received %s\" % points)\n\n plen = len(points)\n if plen < 2:\n return []\n\n # preallocate a simple map to tell us whether a Point is spanned\n spanned = [False] * plen\n # span the first point\n spanned[0] = True\n edges = []\n result = []\n\n for lkey, left in enumerate(points):\n for rkey, right in enumerate(points):\n #if left != right:\n edges.append((lkey, rkey, edist(left, right)))\n\n edges.sort(key=itemgetter(2))\n\n while len(result) < plen - 1:\n for edge in edges:\n lkey, rkey, _ = edge\n if spanned[lkey] != spanned[rkey]:\n result.append((points[lkey], points[rkey]))\n spanned[lkey] = spanned[rkey] = True\n break\n\n return result", "def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True", "def get_new_bracket(x1, x2, x3, x4):\n points = [x1, x2, x3]\n dist = float(inf)\n for point in points:\n if abs(x4 - point) < dist and f(point) * f(x4) < 0:\n valid_point = point\n dist = abs(x4 - point)\n return valid_point" ]
[ "0.7381319", "0.7166372", "0.71521324", "0.69881785", "0.69819653", "0.69719875", "0.69375825", "0.68497926", "0.6776819", "0.66407245", "0.6561083", "0.6464603", "0.6462413", "0.6459737", "0.64563394", "0.64457273", "0.6220267", "0.6213466", "0.62115705", "0.61951804", "0.6154982", "0.60987306", "0.60948527", "0.6077064", "0.60763276", "0.6067359", "0.60651207", "0.60634923", "0.60379237", "0.60038215", "0.59809804", "0.58944136", "0.5889525", "0.5882951", "0.58251035", "0.5822965", "0.580944", "0.57877785", "0.57742226", "0.5752774", "0.574566", "0.57341397", "0.5730006", "0.5716814", "0.56696016", "0.5620985", "0.56118727", "0.5598853", "0.55495685", "0.5529852", "0.5524794", "0.55032283", "0.54667765", "0.5441677", "0.5422033", "0.5420616", "0.5385005", "0.5360498", "0.5346937", "0.5330463", "0.53201", "0.5319292", "0.5314921", "0.5301615", "0.5292476", "0.5282587", "0.52745557", "0.52617294", "0.525864", "0.5250199", "0.52472556", "0.5232349", "0.5231299", "0.52308834", "0.52308834", "0.52135074", "0.5185542", "0.51697636", "0.51694053", "0.5137911", "0.51305157", "0.5124433", "0.51192945", "0.5111655", "0.5109556", "0.5091584", "0.5087184", "0.50869197", "0.50809675", "0.5031319", "0.5004094", "0.49880064", "0.49880064", "0.49822456", "0.49717835", "0.4965238", "0.49495584", "0.49370068", "0.49356636", "0.49341017" ]
0.5578606
48
Fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is inscribed.
def __CalculateEllipse(self, contour): if len(contour) > 5: return cv2.fitEllipse(contour) return cv2.minAreaRect(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_ellipse(x,y):\r\n \r\n def fit(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a\r\n \r\n def ellipse_center(a):\r\n b,c,d,f,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[0]\r\n num = b*b-a*c\r\n x0=(c*d-b*f)/num\r\n y0=(a*f-b*d)/num\r\n return np.array([x0,y0])\r\n \r\n def ellipse_angle_of_rotation(a):\r\n b,c,a = a[1]/2, a[2], a[0]\r\n return 0.5*np.arctan(2*b/(a-c))\r\n \r\n def ellipse_axis_length(a):\r\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\r\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\r\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n res1=np.sqrt(up/down1)\r\n res2=np.sqrt(up/down2)\r\n return np.array([res1, res2])\r\n \r\n a = fit(x,y)\r\n center = ellipse_center(a)\r\n theta = ellipse_angle_of_rotation(a)\r\n [R1,R2] = ellipse_axis_length(a)\r\n\r\n return R1, R2, center, theta", "def fitEllipse(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = eig(np.dot(inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a", "def create_ellipse(self, ratio):\n circ = Point(self.center).buffer(1.0)\n ell = affinity.scale(circ, float(\n self.lengths[0]*ratio), float(self.lengths[1]*ratio))\n ellr = affinity.rotate(ell, self.angle)\n return ellr", "def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator", "def to_ellipse(self, factor=1.0):\n self._check_initialized()\n vals, vecs = sp.linalg.eigh(self.covariance)\n order = vals.argsort()[::-1]\n vals, vecs = vals[order], vecs[:, order]\n angle = np.arctan2(*vecs[:, 0][::-1])\n width, height = factor * np.sqrt(vals)\n return angle, width, height", "def fitEllipsetoContour(poly):\n from skimage.measure import EllipseModel\n\n model = EllipseModel()\n success=model.estimate(poly)\n\n if success:\n yc, xc, a, b, theta=model.params # xc, yc, a, b, theta <- in radiand\n # convert theta to degree\n theta=np.degrees(theta)\n theta=90.-theta\n # make sure major is larger than minor and theta is for major\n if a<b:\n [a,b]=[b,a]\n theta=theta-90.\n if theta<0.: theta=theta+180.\n params=(xc, yc, a, b, theta)\n return params\n else:\n return (0, 0, 0, 0, 0)", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def drawFitEllipse(img, cnt):\n\td = fitEllipse(cnt)\n\tcv2.ellipse(img, d[\"center\"], (d[\"major\"], d[\"minor\"]), d[\"angle\"])", "def _proc_ellipse(self, tokens, filled):\n\n component = Ellipse(pen=self.pen,\n x_origin=tokens[\"x0\"],\n y_origin=tokens[\"y0\"],\n e_width=tokens[\"w\"],\n e_height=tokens[\"h\"],\n filled=filled)\n\n return component", "def ellipseDesc(lps):\r\n unit = 100 #units in QualiTree are in [mm], hence Pgl is in [dm] ?\r\n\r\n if isinstance(lps, pgl.Translated):\r\n cx, cy, cz = lps.translation\r\n else:\r\n print\"missing Translated from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n\r\n ori = lps.geometry\r\n\r\n if isinstance(ori, pgl.Oriented):\r\n rotMat = ori.transformation().getMatrix3()\r\n az, el, roll = rotMat.eulerAnglesZYX()\r\n else:\r\n print\"missing Oriented from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n az = 0\r\n \r\n scal = ori.geometry\r\n\r\n if isinstance(scal, pgl.Scaled):\r\n scMat = scal.transformation().getMatrix()\r\n rx, ry, rz, rt = scMat.getDiagonal()\r\n else:\r\n print\"missing Scaled from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n rx=ry=rz=1\r\n\r\n #x1, y1, z1 #Conversion repère MappleT (m) à reprère Qualitree (q) : Xq=Xm Yq=Zm Zq=-Ym. \r\n #Due to change of coordinate axis, rotation needs - pi <-- apparently not !\r\n #return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az-3.1415927\r\n\r\n return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az", "def _draw_ellipse(data, obj, draw_options):\n if isinstance(obj, mpl.patches.Circle):\n # circle specialization\n return _draw_circle(data, obj, draw_options)\n x, y = obj.center\n ff = data[\"float format\"]\n\n if obj.angle != 0:\n draw_options.append(\n f\"rotate around={{{obj.angle:{ff}}:(axis cs:{x:{ff}},{y:{ff}})}}\"\n )\n\n do = \",\".join(draw_options)\n content = (\n f\"\\\\draw[{do}] (axis cs:{x:{ff}},{y:{ff}}) ellipse \"\n f\"({0.5 * obj.width:{ff}} and {0.5 * obj.height:{ff}});\\n\"\n )\n content += _patch_legend(obj, draw_options, \"area legend\")\n\n return data, content", "def drawEllipse(img, center, axes, angle, startAngle=0, endAngle=360, color = (0,0,255), fill = -1):\n\tcv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, fill)", "def DrawEllipseRect(*args, **kwargs):\n return _gdi_.DC_DrawEllipseRect(*args, **kwargs)", "def ellipse_dist_ratio_poly(self, theta, lwr):\n\n \"\"\"\n\n Params for FWD fit\n array([ 9.99999989e-01, 8.10852195e+07, 1.95444928e+00, 7.96543026e-02])\n this one is un-needed, since it's approximation y = 1\n\n Params for FWD_DIAG fit\n array([-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n\n Params for ORTHOG fit\n array([-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n\n Params for BCK_DIAG fit\n array([-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n\n Params for BCK fit\n array([ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n \"\"\"\n\n #fitting function\n def f(x,params):\n return params[0] + (1.0 / (params[1]*(x+params[2])**params[3]))\n\n #force float math, in case theta is an integer\n theta = float(theta)\n\n #into an angle index form:\n t = abs(int(4.0*theta/np.pi))\n\n if (t == 0) or (t == 8):\n return 1.0\n elif (t == 1) or (t == 7):\n #forward diagonal\n return f(lwr, [-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n elif (t == 2) or (t == 6):\n #orthogonal\n return f(lwr, [-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n elif (t == 3) or (t == 5):\n #backward diagonal\n return f(lwr, [-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n elif t == 4:\n #backward\n return f(lwr, [ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n else:\n #hmmm... TODO\n return 0.0", "def plot_ellipse(center, covariance, alpha, color):\n # eigenvalues and eigenvector of matrix covariance\n eigenvalues, eigenvector = np.linalg.eigh(covariance)\n order = eigenvalues.argsort()[::-1]\n eigenvector = eigenvector[:, order]\n\n # Calculate Angle of ellipse\n angle = np.degrees(np.arctan2(*eigenvector[:, 0][::-1]))\n\n # Calculate with, height\n width, height = 4 * np.sqrt(eigenvalues[order])\n\n # Ellipse Object\n ellipse = Ellipse(xy=center, width=width, height=height, angle=angle,\n alpha=alpha, color=color)\n\n ax = plt.gca()\n ax.add_artist(ellipse)\n\n return ellipse", "def add_ellipse(self, x, y, w, h, fill_style=None, outline_style=None):\n if w < 1 or h < 1:\n return\n fill_style = self.__prepare_style(fill_style, self.style.char)\n outline_style = self.__prepare_style(outline_style, 'o')\n # Bresenham's algorithm to plot ellipse is used\n a = w\n b = h - 1\n eight_a_square = 8 * a * a\n eight_b_square = 8 * b * b\n x_change = 4 * b * b * (1.0 - a)\n y_change = 4 * a * a * ((b & 1) + 1)\n ellipse_error = x_change + y_change + (b & 1) * a * a\n x0 = x\n x1 = x0 + w - 1\n y0 = y + h / 2\n y1 = y0 - (b & 1)\n outline_points = []\n while x0 <= x1:\n # add fill\n if x0 > x and x0 < x + w - 1:\n self.add_line(int(x0), int(y0), int(x0), int(y1), fill_style)\n self.add_line(int(x1), int(y0), int(x1), int(y1), fill_style)\n outline_points.append((int(x1), int(y0)))\n outline_points.append((int(x0), int(y0)))\n outline_points.append((int(x0), int(y1)))\n outline_points.append((int(x1), int(y1)))\n two_ellipse_error = 2 * ellipse_error\n if two_ellipse_error <= y_change:\n y0 += 1\n y1 -= 1\n y_change += eight_a_square\n ellipse_error += y_change\n if two_ellipse_error >= x_change or 2 * ellipse_error > y_change:\n x0 += 1\n x1 -= 1\n x_change += eight_b_square\n ellipse_error += x_change\n while y0 - y1 <= b:\n self.add_point(int(x0 - 1), int(y0), outline_style)\n self.add_point(int(x1 + 1), int(y0), outline_style)\n self.add_point(int(x0 - 1), int(y1), outline_style)\n self.add_point(int(x1 + 1), int(y1), outline_style)\n y0 += 1\n y1 -= 1\n # draw outline over fill\n for outline_point in outline_points:\n px, py = outline_point\n self.add_point(px, py, outline_style)", "def _getEllipseSize(self, pointInEllipse):\n x = abs(self.center[0] - pointInEllipse[0])\n y = abs(self.center[1] - pointInEllipse[1])\n if x == 0 or y == 0:\n return x, y\n # Ellipse definitions\n # e: eccentricity\n # a: length fron center to bounding box width\n # b: length fron center to bounding box height\n # Equations\n # (1) b < a\n # (2) For x,y a point in the ellipse: x^2/a^2 + y^2/b^2 = 1\n # (3) b = a * sqrt(1-e^2)\n # (4) e = sqrt(a^2 - b^2) / a\n\n # The eccentricity of the ellipse defined by a,b=x,y is the same\n # as the one we are searching for.\n swap = x < y\n if swap:\n x, y = y, x\n e = math.sqrt(x**2 - y**2) / x\n # From (2) using (3) to replace b\n # a^2 = x^2 + y^2 / (1-e^2)\n a = math.sqrt(x**2 + y**2 / (1.0 - e**2))\n b = a * math.sqrt(1 - e**2)\n if swap:\n a, b = b, a\n return a, b", "def tilted_ellipse(s, pos1, pos2, size_x, size_y, color, angle):\n surface = pygame.Surface((150, 150), pygame.SRCALPHA, 32).convert_alpha()\n ellipse(surface, color, (0, 0, size_x, size_y))\n surface2 = pygame.transform.rotate(surface, angle)\n return s.blit(surface2, (pos1, pos2))", "def ellipse(x,y,a,b):\n return ((x/float(a))**2 + (y/float(b))**2)", "def draw_ellipse_outline(center_x, center_y, width, height, color,\n border_width=1, tilt_angle=0):\n\n num_segments = 128\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_LOOP)\n for segment in range(num_segments):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()", "def fit_ellipse(*args, equatorial_radius, dequatorial_radius=0, center_f=0, dcenter_f=0, center_g=0,\n dcenter_g=0, oblateness=0, doblateness=0, position_angle=0, dposition_angle=0,\n loop=10000000, number_chi=10000, dchi_min=None, verbose=False, ellipse_error=0, sigma_result=1):\n from sora.extra import ChiSquare\n from sora.config.visuals import progressbar_show\n from astropy.coordinates import Angle\n from .core import Occultation\n\n v = {'dcenter_f': dcenter_f, 'dcenter_g': dcenter_g, 'doblateness': doblateness, 'dposition_angle': dposition_angle,\n 'dequatorial_radius': dequatorial_radius, 'ellipse_error': ellipse_error, 'sigma_result': sigma_result,\n 'dchi_min': dchi_min}\n for key, item in v.items():\n if item is not None and item < 0:\n raise ValueError(\"{} must be a positive number.\".format(key))\n\n values = []\n chord_name = []\n if len(args) == 0:\n raise ValueError('No occultation have been given as input.')\n for occ in args:\n if not isinstance(occ, Occultation):\n raise TypeError('Given argument must be an Occultation object.')\n for name, chord in occ.chords.items():\n if chord.status() == 'positive':\n if chord.is_able['immersion']:\n f, g, vf, vg = chord.get_fg(time='immersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.immersion_err\n values.append([f, g, err])\n chord_name.append(name + '_immersion')\n if chord.is_able['emersion']:\n f, g, vf, vg = chord.get_fg(time='emersion', vel=True)\n err = np.linalg.norm([vf, vg])*chord.lightcurve.emersion_err\n values.append([f, g, err])\n chord_name.append(name + '_emersion')\n\n controle_f0 = Time.now()\n f0_chi = np.array([])\n g0_chi = np.array([])\n a_chi = np.array([])\n obla_chi = np.array([])\n posang_chi = np.array([])\n chi2_best = np.array([])\n\n while len(f0_chi) < number_chi:\n progressbar_show(len(f0_chi), number_chi, prefix='Ellipse fit:')\n chi2 = np.zeros(loop)\n f0 = center_f + dcenter_f*(2*np.random.random(loop) - 1)\n g0 = center_g + dcenter_g*(2*np.random.random(loop) - 1)\n a = equatorial_radius + dequatorial_radius*(2*np.random.random(loop) - 1)\n obla = oblateness + doblateness*(2*np.random.random(loop) - 1)\n obla[obla < 0], obla[obla > 1] = 0, 1\n phi_deg = position_angle + dposition_angle*(2*np.random.random(loop) - 1)\n controle_f1 = Time.now()\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n f_model = f0 + r_model*np.cos(theta)\n g_model = g0 + r_model*np.sin(theta)\n chi2 += ((fi - f_model)**2 + (gi - g_model)**2)/(si**2 + ellipse_error**2)\n\n controle_f2 = Time.now()\n if dchi_min is not None:\n region = np.where(chi2 < chi2.min() + dchi_min)[0]\n else:\n region = np.arange(len(chi2))\n chi2_best = np.append(chi2_best, chi2[region])\n if verbose:\n print('Elapsed time: {:.3f} seconds.'.format((controle_f2 - controle_f1).sec))\n print(len(chi2[region]), len(chi2_best))\n f0_chi = np.append(f0_chi, f0[region])\n g0_chi = np.append(g0_chi, g0[region])\n a_chi = np.append(a_chi, a[region])\n obla_chi = np.append(obla_chi, obla[region])\n posang_chi = np.append(posang_chi, phi_deg[region])\n\n progressbar_show(number_chi, number_chi, prefix='Ellipse fit:')\n chisquare = ChiSquare(chi2_best, len(values), center_f=f0_chi, center_g=g0_chi, equatorial_radius=a_chi,\n oblateness=obla_chi, position_angle=posang_chi)\n controle_f4 = Time.now()\n if verbose:\n print('Total elapsed time: {:.3f} seconds.'.format((controle_f4 - controle_f0).sec))\n\n result_sigma = chisquare.get_nsigma(sigma=sigma_result)\n a = result_sigma['equatorial_radius'][0]\n f0 = result_sigma['center_f'][0]\n g0 = result_sigma['center_g'][0]\n obla = result_sigma['oblateness'][0]\n phi_deg = result_sigma['position_angle'][0]\n radial_dispersion = np.array([])\n error_bar = np.array([])\n position_angle_point = np.array([])\n\n for fi, gi, si in values:\n b = a - a*obla\n phi = phi_deg*(np.pi/180.0)\n dfi = fi-f0\n dgi = gi-g0\n r = np.sqrt(dfi**2 + dgi**2)\n theta = np.arctan2(dgi, dfi)\n ang = theta+phi\n r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)\n radial_dispersion = np.append(radial_dispersion, r - r_model)\n error_bar = np.append(error_bar, si)\n position_angle_point = np.append(position_angle_point, Angle(90*u.deg - theta*u.rad).wrap_at(360 * u.deg).degree)\n\n for occ in args:\n if isinstance(occ, Occultation):\n occ.fitted_params = {i: result_sigma[i] for i in ['equatorial_radius', 'center_f', 'center_g',\n 'oblateness', 'position_angle']}\n occ.chi2_params = {'chord_name': chord_name, 'radial_dispersion': radial_dispersion,\n 'position_angle': position_angle_point, 'radial_error': error_bar,\n 'chi2_min': chisquare.get_nsigma(sigma=sigma_result)['chi2_min'],\n 'nparam': chisquare.nparam, 'npts': chisquare.npts}\n return chisquare", "def ellipse(self, arg, fill='', outline=''):\n pass", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def get_ellipse_mask(ellipse, img_shape, offset=0):\n # create image\n mask = np.zeros((img_shape[0], img_shape[1]), dtype=np.uint8)\n\n try:\n # fill ellipse\n draw_ellipse(mask, ellipse, (255, 255, 255), -1)\n except Exception as ex:\n logging.getLogger(\"StrainDetection\").warning(\"Unable to create ellipse mask: {}\".format(ex))\n mask += 255 # make mask white to include everything\n return mask\n\n # dilate/erode by given offset, if necessary\n if offset != 0:\n operation = cv.MORPH_DILATE\n if offset < 0:\n operation = cv.MORPH_ERODE # if offset is negative --> erode\n\n # create kernel\n n = 2 * abs(offset) + 1\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (n, n))\n\n # perform morphological operation\n mask = cv.morphologyEx(mask, operation, kernel)\n\n return mask", "def ellipsoid(center, radii, rotation, scales=None, shape=None, minarea=0):\n center = np.array(center)\n radii = np.array(radii)\n rotation = np.array(rotation)\n assert center.shape == (3,)\n assert radii.shape == (3,)\n assert 0 < radii.max(), \"radii should contain at least one positive value\"\n assert rotation.shape == (3, 3)\n if scales is None:\n scales = (1.,) * 3\n scales = np.array(scales)\n assert scales.shape == (3,)\n\n scaled_center = center / scales\n\n # The upper_left_bottom and lower_right_top corners of the smallest cuboid\n # containing the ellipsoid.\n factor = np.array([\n [i, j, k] for k in (-1, 1) for j in (-1, 1) for i in (-1, 1)]).T\n while True:\n radii_rot = np.abs(\n np.diag(1. / scales).dot(rotation.dot(np.diag(radii).dot(factor)))\n ).max(axis=1)\n # In the original scikit-image code, ceil and floor were replaced.\n # https://github.com/scikit-image/scikit-image/blob/master/skimage/draw/draw.py#L127\n upper_left_bottom = np.floor(scaled_center - radii_rot).astype(int)\n lower_right_top = np.ceil(scaled_center + radii_rot).astype(int)\n\n if shape is not None:\n # Constrain upper_left and lower_ight by shape boundary.\n upper_left_bottom = np.maximum(\n upper_left_bottom, np.array([0, 0, 0]))\n lower_right_top = np.minimum(\n lower_right_top, np.array(shape[:3]) - 1)\n\n bounding_shape = lower_right_top - upper_left_bottom + 1\n\n d_lim, r_lim, c_lim = np.ogrid[0:float(bounding_shape[0]),\n 0:float(bounding_shape[1]),\n 0:float(bounding_shape[2])]\n d_org, r_org, c_org = scaled_center - upper_left_bottom\n d_rad, r_rad, c_rad = radii\n rotation_inv = np.linalg.inv(rotation)\n conversion_matrix = rotation_inv.dot(np.diag(scales))\n d, r, c = (d_lim - d_org), (r_lim - r_org), (c_lim - c_org)\n distances = (\n ((d * conversion_matrix[0, 0] +\n r * conversion_matrix[0, 1] +\n c * conversion_matrix[0, 2]) / d_rad) ** 2 +\n ((d * conversion_matrix[1, 0] +\n r * conversion_matrix[1, 1] +\n c * conversion_matrix[1, 2]) / r_rad) ** 2 +\n ((d * conversion_matrix[2, 0] +\n r * conversion_matrix[2, 1] +\n c * conversion_matrix[2, 2]) / c_rad) ** 2\n )\n if distances.size < minarea:\n old_radii = radii.copy()\n radii *= 1.1\n print('Increase radii from ({}) to ({})'.format(old_radii, radii))\n else:\n break\n distance_thresh = 1\n while True:\n dd, rr, cc = np.nonzero(distances < distance_thresh)\n if len(dd) < minarea:\n distance_thresh *= 1.1\n else:\n break\n dd.flags.writeable = True\n rr.flags.writeable = True\n cc.flags.writeable = True\n dd += upper_left_bottom[0]\n rr += upper_left_bottom[1]\n cc += upper_left_bottom[2]\n return dd, rr, cc", "def ci95_ellipse(data, type=\"pop\"):\n\n # Build and fit PCA model\n pca = PCA()\n pca.fit(data)\n coeff = pca.components_\n score = pca.transform(data)\n eigvals = pca.explained_variance_\n\n # Calculate rotation angle\n phi = math.atan2(coeff[0, 1], coeff[0, 0])\n\n # This angle is between -pi and pi.\n # Let's shift it such that the angle is between 0 and 2pi\n if phi < 0:\n phi += 2 * math.pi\n\n # Get the coordinates of the data mean\n n = len(data)\n m = np.mean(data, axis=0)\n x0 = m[0]\n y0 = m[1]\n\n # Get the 95% confidence interval error ellipse\n # inverse of the chi-square cumulative distribution for p = 0.05 & 2 d.f. = 5.9915\n chisquare_val = 5.9915\n if type is \"pop\":\n a = math.sqrt(chisquare_val * eigvals[0])\n b = math.sqrt(chisquare_val * eigvals[1])\n elif type is \"mean\":\n a = math.sqrt(chisquare_val * eigvals[0] / n)\n b = math.sqrt(chisquare_val * eigvals[1] / n)\n else:\n raise ValueError(\"type has to be 'pop' or 'mean'.\")\n\n # the ellipse in x and y coordinates\n theta_grid = np.linspace(0, 2 * math.pi, num=100)\n ellipse_x_r = a * np.cos(theta_grid)\n ellipse_y_r = b * np.sin(theta_grid)\n\n # Define a rotation matrix\n R = np.array([[np.cos(phi), np.sin(phi)], [-np.sin(phi), np.cos(phi)]])\n # let's rotate the ellipse to some angle phi\n r_ellipse = np.dot(np.vstack((ellipse_x_r, ellipse_y_r)).T, R)\n\n # Draw the error ellipse\n x = r_ellipse[:, 0] + x0\n y = r_ellipse[:, 1] + y0\n ellipse = np.stack((x, y), axis=1)\n\n outside = []\n for i in range(len(score)):\n metric = (score[i, 0] / a) ** 2 + (score[i, 1] / b) ** 2\n if metric > 1:\n outside.append(1)\n else:\n outside.append(0)\n\n return ellipse, outside", "def ellipse(radii = (10,5), angle_resolution = 2.5, layer = 0):\n D = Device(name = 'ellipse')\n a = radii[0]\n b = radii[1]\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n r = a*b / (sqrt((b*cos(t))**2 + (a*sin(t))**2))\n xpts = r*cos(t)\n ypts = r*sin(t)\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', 0)\n cy = ellipse.get('cy', 0)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def ellipse(self, x, y, radiusx, radiusy, rotation=0, startangle=0, endangle=2 * pi, anticlockwise=False):\n self._impl.ellipse(x, y, radiusx, radiusy, rotation, startangle, endangle, anticlockwise)", "def fitEllipse(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn {\"center\":(x,y), \"major\":MA, \"minor\":ma, \"angle\":angle}", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', None)\n cy = ellipse.get('cy', None)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def ellipse2pathd(ellipse):\n\n cx = ellipse.get('cx', None)\n cy = ellipse.get('cy', None)\n rx = ellipse.get('rx', None)\n ry = ellipse.get('ry', None)\n r = ellipse.get('r', None)\n\n if r is not None:\n rx = ry = float(r)\n else:\n rx = float(rx)\n ry = float(ry)\n\n cx = float(cx)\n cy = float(cy)\n\n d = ''\n d += 'M' + str(cx - rx) + ',' + str(cy)\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'\n d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'\n\n return d", "def DrawEllipseRect(*args, **kwargs):\n return _gdi_.PseudoDC_DrawEllipseRect(*args, **kwargs)", "def draw_ellipse(self, color, position, size=None,\n border_width=0, anchor='topleft'):\n if size is None:\n rect = spyral.Rect(position)\n else:\n rect = spyral.Rect(position, size)\n offset = self._calculate_offset(anchor, rect.size)\n pygame.draw.ellipse(self._surf, color,\n (rect.pos + offset, rect.size), border_width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def r_ellipse(self,xc=None,yc=None):\n x = self.x\n y = self.y\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.rel = sqrt(self.cxx*(x-xc)**2 +\n\t\t self.cyy*(y-yc)**2 +\n\t\t self.cxy*(x-xc)*(y-yc)\n\t\t )", "def draw_ellipse_filled(center_x, center_y,\n width, height, color, tilt_angle=0):\n\n num_segments = 128\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_TRIANGLE_FAN)\n\n GL.glVertex3f(0, 0, 0.5)\n\n for segment in range(num_segments + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()", "def cartesian_to_ellipse(center, angle, lengths):\n xInd, yInd = np.mgrid[:512, :512]\n major = max(lengths)/np.mean(lengths)\n minor = min(lengths)/np.mean(lengths)\n xInd, yInd = xInd - center[0], yInd - center[1]\n xInd, yInd = rotate(xInd, yInd, angle=-angle)\n xInd, yInd = xInd*minor, yInd*major\n xInd, yInd = rotate(xInd, yInd, angle=angle)\n return xInd, yInd", "def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0,\n **kwargs):\n ellipse = Ellipse(xy=centroid, width=length, height=width,\n angle=np.degrees(angle), fill=False, **kwargs)\n self.axes.add_patch(ellipse)\n self.update()\n return ellipse", "def render_ellipse_filled(shape, center_x, center_y, color, angle=0):\n # Set color\n if len(color) == 4:\n GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2],\n shape.color[3])\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n elif len(color) == 3:\n GL.glDisable(GL.GL_BLEND)\n GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)\n GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n if angle:\n GL.glRotatef(angle, 0, 0, 1)\n\n GL.glDrawArrays(GL.GL_TRIANGLE_FAN, 0, shape.size)", "def draw_ellipse(mat, radius_x, radius_y, angle, color=(0, 0, 255), thickness=1):\n cv2.ellipse(mat, (radius_x, radius_y), angle, 0, 0, color, thickness=thickness)", "def box_ellipse(A, r):\n A = np.array(A)\n A = A.transpose().dot(A)\n size = len(A)\n widths = []\n for i in range(size):\n setperp = [k for k in range(i)] + [k for k in range(i + 1, size)]\n v = A[i, setperp]\n A22 = A[setperp][:, setperp]\n try:\n Aperpinv = np.linalg.inv(A22)\n gamma = Aperpinv.dot(v)\n gamma = gamma.dot(v)\n widths.append(r / np.sqrt(A[i, i] - gamma))\n except np.linalg.linalg.LinAlgError:\n widths.append(1.0e300)\n return widths", "def draw_ellipse(image, bounds, width=1, outline: Color = 'white', antialias=4):\n\n # Use a single channel image (mode='L') as mask.\n # The size of the mask can be increased relative to the imput image\n # to get smoother looking results.\n mask = Image.new(size=[int(dim * antialias) for dim in image.size], mode='L', color='black')\n draw = ImageDraw.Draw(mask)\n\n # draw outer shape in white (color) and inner shape in black (transparent)\n for offset, fill in (-7, 'white'), (width, 'black'):\n left, top = [(value + offset) * antialias for value in bounds[:2]]\n right, bottom = [(value - offset) * antialias for value in bounds[2:]]\n draw.ellipse([left, top, right, bottom], fill=fill)\n\n # downsample the mask using PIL.Image.LANCZOS\n # (a high-quality downsampling filter).\n mask = mask.resize(image.size, Image.LANCZOS)\n # paste outline color to input image through the mask\n image.paste(outline, mask=mask)", "def ellipse_pt(th, x_c, y_c, a, b, rot):\n x = x_c + (a * cos(th) * cos(rot) - b * sin(th) * sin(rot))\n y = y_c + (a * cos(th) * sin(rot) - b * sin(th) * cos(rot))\n return x, y", "def r_ellipse(radius=5, xc=[0., 0.], q=0.5, pa=0, re=1., gf_header=None, comp='2', verbose=True, nstep=256, psf_offset=[1, 1], **kwargs):\n if gf_header is not None:\n xx = gf_header_key(gf_header, comp+'_XC')\n yy = gf_header_key(gf_header, comp+'_YC')\n xc = np.array([xx, yy])\n mag = gf_header_key(gf_header, comp+'_MAG')\n\n if comp+'_N' in gf_header:\n n = gf_header_key(gf_header, comp+'_N')\n q = gf_header_key(gf_header, comp+'_AR')\n pa = gf_header_key(gf_header, comp+'_PA')\n re = gf_header_key(gf_header, comp+'_RE')\n else:\n n = 1.\n q = 1.\n pa = 0.\n re = 0.01\n\n if verbose:\n print(f'xc:{xc}, q:{q}, pa:{pa}')\n\n phi = np.linspace(0, 2*np.pi, nstep)\n xp = np.array([np.cos(phi), q*np.sin(phi)]).T*radius\n\n theta = -(np.pi/2 + pa/180*np.pi) # + np.pi\n\n _rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n\n x0 = xp.dot(_rot) + np.atleast_1d(xc)\n xell, yell = (x0 - 1 - np.atleast_1d(psf_offset)).T\n\n return xell, yell", "def drawEllipse(self,id,x,y,rx,ry):\n if id in self.elements.keys():\n print(\"The id for the ellipse has been registered! Please use another one\")\n return\n try:\n self.checkInBound(x,0); self.checkInBound(x+rx,0); self.checkInBound(x-rx,0)\n except AssertionError as e:\n print(\"The ellipse set would be longer than the x axis of canvas!\")\n return\n try:\n self.checkInBound(y,0); self.checkInBound(y+ry,0); self.checkInBound(y-ry,0)\n except AssertionError as e:\n print(\"The ellipse set would be longer than the y axis of canvas!\")\n return\n ellipseEle=Ellipse(id,self.h-1-y,x,ry,rx,self.drawColor)\n self.elements[id]=ellipseEle\n self.sync=False", "def fill_ellipse(self, x0, y0, a, b, color):\n a2 = a * a\n b2 = b * b\n twoa2 = a2 + a2\n twob2 = b2 + b2\n x = 0\n y = b\n px = 0\n py = twoa2 * y\n # Plot initial points\n self.line(x0, y0 - y, x0, y0 + y, color)\n # Region 1\n p = round(b2 - (a2 * b) + (0.25 * a2))\n while px < py:\n x += 1\n px += twob2\n if p < 0:\n p += b2 + px\n else:\n y -= 1\n py -= twoa2\n p += b2 + px - py\n self.line(x0 + x, y0 - y, x0 + x, y0 + y, color)\n self.line(x0 - x, y0 - y, x0 - x, y0 + y, color)\n # Region 2\n p = round(b2 * (x + 0.5) * (x + 0.5) +\n a2 * (y - 1) * (y - 1) - a2 * b2)\n while y > 0:\n y -= 1\n py -= twoa2\n if p > 0:\n p += a2 - py\n else:\n x += 1\n px += twob2\n p += a2 - py + px\n self.line(x0 + x, y0 - y, x0 + x, y0 + y, color)\n self.line(x0 - x, y0 - y, x0 - x, y0 + y, color)", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def scatter_ellipse(axis_ob, x, y, w, h, c='b', a=0.0, alpha=0.5):\r\n if not axis_ob._hold:\r\n axis_ob.cla()\r\n\r\n if not iterable(a):\r\n a = [a] * len(x)\r\n\r\n if not iterable(alpha):\r\n alpha = [alpha] * len(x)\r\n if len(c) != len(x):\r\n raise ValueError('c and x are not equal lengths')\r\n if len(w) != len(x):\r\n raise ValueError('w and x are not equal lengths')\r\n\r\n if len(h) != len(x):\r\n raise ValueError('h and x are not equal lengths')\r\n if len(a) != len(x):\r\n raise ValueError('a and x are not equal lengths')\r\n # if len(alpha)!=len(x):\r\n # raise ValueError, 'alpha and x are not equal lengths'\r\n patches = []\r\n for thisX, thisY, thisW, thisH, thisC, thisA, thisAl in \\\r\n zip(x, y, w, h, c, a, alpha):\r\n ellip = Ellipse((thisX, thisY), width=thisW, height=thisH,\r\n angle=thisA)\r\n\r\n ellip.set_facecolor(thisC)\r\n ellip.set_alpha(thisAl)\r\n axis_ob.add_patch(ellip)\r\n patches.append(ellip)\r\n axis_ob.autoscale_view()\r\n return axis_ob", "def ellipseToPath(self,node):\n cx = float(node['cx'])\n cy = float(node['cy'])\n rx = 0\n ry = 0\n if 'rx' in node:\n rx = float(node['rx'])\n if 'ry' in node:\n ry = float(node['ry'])\n\n d ='M %f,%f '%(cx-rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy-ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx+rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy+ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx-rx,cy)\n\n return d", "def smallest_ellipse(points, tol = 0.001):\n points = np.asmatrix(points)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n \n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d\n return np.asarray(A), np.squeeze(np.asarray(c))", "def create_ellipse(home_location, work_location, spread):\n\tif work_location is None:\n\t\treturn None\n\telse:\n\t\ta = home_location.distance(work_location)\n\t\tb = a * float(spread)\n\t\tpoint_list = []\n\t\tazimuth = math.atan2(work_location.y - home_location.y, work_location.x - home_location.x)\n\t\tro = (math.pi / 200)\n\n\t\tfor t in range(0, 401):\n\t\t\tx = home_location.x + (a * math.cos(t * ro) * math.cos(azimuth) - b * math.sin(t * ro) * math.sin(azimuth))\n\t\t\ty = home_location.y + (b * math.sin(t * ro) * math.cos(azimuth) + a * math.cos(t * ro) * math.sin(azimuth))\n\t\t\tpoint_list.append([Point(x, y).x, Point(x, y).y])\n\t\treturn Polygon(point_list)", "def create_ellipse(width, height, color):\n num_segments = 64\n\n data = []\n\n for segment in range(num_segments + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n data.extend([x, y])\n\n vbo_id = GL.GLuint()\n\n GL.glGenBuffers(1, ctypes.pointer(vbo_id))\n\n v2f = data\n data2 = (GL.GLfloat * len(v2f))(*v2f)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)\n GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,\n GL.GL_STATIC_DRAW)\n\n shape = VertexBuffer(vbo_id, len(v2f) // 2, width, height, color)\n return shape", "def getEllipticalKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)", "def draw_probe_ellipse(xy, covar, alpha, color=None, **kwargs):\n \n b24ac = scipy.sqrt(pow(covar[0,0] - covar[1,1],2) + 4*pow(covar[0,1],2))\n c2inv = chi2.ppf(alpha, 2.)\n \n a = scipy.real(scipy.sqrt(c2inv*.5*(covar[0,0] + covar[1,1] + b24ac)))\n b = scipy.real(scipy.sqrt(c2inv*.5*(covar[0,0] + covar[1,1] - b24ac)))\n\n if covar[0,0] != covar[1,1]:\n theta = .5*scipy.arctan(2*covar[0,1]/(covar[0,0] - covar[1,1]))\n print(theta)\n else:\n theta = scipy.sign(covar[0,1])*scipy.pi/4\n \n if covar[1,1] > covar[0,0]:\n swap = a\n a = b\n b = swap\n\n ellipse = Ellipse(xy, 2*a, 2*b, angle=theta*180./scipy.pi, edgecolor=color, fill=False, **kwargs)\n plt.gca().add_patch(ellipse)\n return ellipse", "def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0, **kwargs):\n ellipse = Ellipse(\n x=centroid[0],\n y=centroid[1],\n width=length,\n height=width,\n angle=angle,\n fill_color=None,\n **kwargs,\n )\n glyph = self.figure.add_glyph(ellipse)\n self._annotations.append(glyph)\n self.update()\n return ellipse", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def proc_filled_ellipse(self, tokens):\n\n return self._proc_ellipse(tokens, filled=True)", "def plot_cov_ellipse(ellipses, cov, pos=[0.0, 0.0], nstds=[0.0,1.0,2.0], **kwargs):\n def eigsorted(cov):\n vals, vecs = _np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n return vals[order], vecs[:,order]\n\n\n vals, vecs = eigsorted(cov)\n theta = _np.degrees(_np.arctan2(*vecs[:,0][::-1]))\n\n # Width and height are \"full\" widths, not radius\n sigma_max = 0.5\n alpha = min(0.8, _np.prod(sigma_max /_np.sqrt(vals)))\n for i,e in enumerate(ellipses):\n sigma = nstds[i]\n width, height = 2 * sigma * _np.sqrt(vals)\n #ellipses[i].center = pos\n e.set_alpha(alpha)\n if sigma > 0.1: #if this is below, then treat ellipse as a center circle and do not modify size at all\n e.width = width\n e.height= height\n e.angle = theta\n e.center = pos\n e.set(**kwargs)\n\n# e.fill=True\n# e.set_linewidth(0.0)\n\n\n return ellipses", "def draw_ellipse(self, color, position, size, border_width = 0, anchor= 'topleft'):\n # We'll try to make sure that everything is okay later\n \n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor, size)\n pygame.draw.ellipse(self._surf, color, (position + offset, size), border_width)", "def geom_ellipse(mean: np.array, cov: np.array, data: np.array=None, q: float=0.95, **kwargs):\n\n # Radius that covers q-fraction of white gaussian noise \n r = np.sqrt(stats.chi2.ppf(q=q, df=2))\n \n # Eigen-directions of a covariance matrix\n try:\n L, W = np.linalg.eigh(cov)\n except:\n return geom_path(aes(x = 'x', y = 'y'), data = DataFrame(columns=['x', 'y']))\n \n # Properly scaled eigen-directions\n W[0, :] = W[0, :] * r * np.sqrt(L[0]) \n W[1, :] = W[1, :] * r * np.sqrt(L[1]) \n \n theta = np.linspace(0, 2 * np.pi, 100)\n \n return geom_path(aes(x = 'x', y = 'y'), data = DataFrame()\n .assign(x = mean[0] + np.sin(theta) * W[0, 0] + np.cos(theta) * W[1, 0])\n .assign(y = mean[1] + np.sin(theta) * W[0, 1] + np.cos(theta) * W[1, 1]), **kwargs)", "def extractOblateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for oblate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2. )\n\n\n # the oblate surface area\n surface = 2. * np.pi * kperp**2. * ( 1. + ( (1. - e**2.) / e ) * np.arctanh(e) )\n\n return surface", "def getEllipse(self, xc, Sigma, nSigma=2):\n\n if nla.det(Sigma) == 0:\n return None\n\n w, v = nla.eig(Sigma)\n D = np.diag(w, 0)\n\n theta = np.linspace(0, 2*np.pi, 100, endpoint=True)\n circle = nSigma*np.vstack((np.cos(theta), np.sin(theta)))\n\n el = sla.sqrtm(D)\n el = el.dot(circle)\n el = v.dot(el)\n\n XY = xc + el\n\n return XY", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def ellipse_bounds(P, level, n=100):\n # Round up to multiple of 2\n n += n % 2\n\n # Principal axes of ellipsoid\n eigval, eigvec = np.linalg.eig(P)\n eigvec *= np.sqrt(level / eigval)\n\n # set zero angle at maximum x\n angle = np.linspace(0, 2 * np.pi, n)[:, None]\n angle += np.arctan(eigvec[0, 1] / eigvec[0, 0])\n\n # Compute positions\n pos = np.cos(angle) * eigvec[:, 0] + np.sin(angle) * eigvec[:, 1]\n n /= 2\n\n # Return x-position (symmetric) and upper/lower bounds\n return pos[:n, 0], pos[:n, 1], pos[:n - 1:-1, 1]", "def test_ellipse_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse3.png')", "def draw_oval(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.ellipse(display, color, (left, top + quarter, box_size, half))", "def fitEllipseDirect(points):\r\n x = points[:, 0]; y = points[:, 1];\r\n # Build design matrix\r\n D = np.vstack((x*x, x*y, y*y, x, y, np.ones(x.shape)))\r\n # Build scatter matrix\r\n S = D.dot(D.T)\r\n # Build constraint matrix\r\n C = np.zeros((6, 6))\r\n C[0, 2]= +2; C[1, 1]= -1; C[2, 0]= +2;\r\n # Solve generalised eigenvalue system C*a == l*S*a\r\n geval, gevec = linalg.eig(S, C)\r\n # Find the eigenvector with the only pozitive eigenvalue\r\n geval = np.real(geval)\r\n i = np.argmax((geval>0) * np.isfinite(geval))\r\n if not np.isfinite(geval[i]):\r\n raise linalg.LinAlgError(\r\n \"Eigenvalue calculation failed to return a valid answer.\" +\r\n \"\\nEigenvalues:\\n\" + str(geval) + '\\n')\r\n theVec = np.real(gevec[:, i])\r\n # That vector has the parameters of the ellipse\r\n return tuple(theVec.flatten())", "def draw_ellipse(position, covariance, ax=None, **kwargs):\r\n # Convert covariance to principal axes\r\n if covariance.shape == (2, 2):\r\n U, s, Vt = np.linalg.svd(covariance)\r\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\r\n width, height = 2 * np.sqrt(s)\r\n else:\r\n angle = 0\r\n width, height = 2 * np.sqrt(covariance)\r\n \r\n # Draw the Ellipse\r\n for nsig in range(1, 4):\r\n ax.add_patch(Ellipse(position, nsig * width, nsig * height, \r\n angle, **kwargs))", "def run_ellipse(img, redo=False):\n # Reading data and mask\n outfile = \"ellipse.txt\"\n if os.path.exists(outfile) and not redo:\n return\n data = make_masked_img(img)\n # Preparing ellipse fitting\n\n geometry = EllipseGeometry(x0=213, y0=235, sma=25, eps=0.3,\n pa=np.deg2rad(-50))\n ellipse = Ellipse(data, geometry)\n isolist = ellipse.fit_image(fflag=0.01, maxsma=200, maxrit=104)\n # isolist = ellipse.fit_image(fflag=0.01, maxsma=20)\n table = isolist.to_table()[1:]\n table.write(outfile, format=\"ascii\", overwrite=True)\n # Producing image\n model_image = build_ellipse_model(data.shape, isolist)\n residual = data - model_image\n fig, (ax1, ax2, ax3) = plt.subplots(figsize=(14, 5), nrows=1, ncols=3)\n fig.subplots_adjust(left=0.04, right=0.98, bottom=0.02, top=0.98)\n ax1.imshow(data, origin='lower')\n ax1.set_title('Data')\n\n smas = np.linspace(5, 200, 10)\n for sma in smas:\n iso = isolist.get_closest(sma)\n x, y, = iso.sampled_coordinates()\n ax1.plot(x, y, color='C1')\n ax2.imshow(model_image, origin='lower')\n ax2.set_title('Ellipse Model')\n ax3.imshow(residual, origin='lower')\n ax3.set_title('Residual')\n plt.savefig(\"ellipse.png\", dpi=250)\n plt.show()", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def proc_unfilled_ellipse(self, tokens):\n\n return self._proc_ellipse(tokens, filled=False)", "def transform(self, heraldry: Image.Image) -> Image.Image:\n heraldry = heraldry.convert('RGBA')\n orig_width, orig_height = heraldry.size\n\n new_height = int(self.height_change * orig_height)\n ell_img = Image.new('RGBA', (orig_width, new_height))\n draw = ImageDraw.Draw(ell_img)\n\n top_left = (0, int(self.ell_start * orig_height))\n bot_right = (orig_width - 1, new_height - 1)\n draw.ellipse((*top_left, *bot_right), fill = self.fill_col)\n ell_img.paste(heraldry, (0, 0), heraldry)\n\n return ell_img", "def __DrawEllipse(self, image, rectangule, color):\n cv2.ellipse(image, rectangule, color, 2)\n points = cv2.boxPoints(rectangule)\n for i in range(4):\n cv2.line(image, tuple(np.array(points[i], np.int32)),\n tuple(np.array(points[(i + 1) % 4], np.int32)), color, 2)", "def area_ellipse(radius_x: float, radius_y: float) -> float:\r\n if radius_x < 0 or radius_y < 0:\r\n raise ValueError(\"area_ellipse() only accepts non-negative values\")\r\n return pi * radius_x * radius_y", "def getMinVolEllipse(P, tolerance=0.01):\n (N, d) = np.shape(P)\n d = float(d)\n\n # Q will be our working array\n Q = np.vstack([np.copy(P.T), np.ones(N)]) \n QT = Q.T\n \n # initializations\n err = 1.0 + tolerance\n u = (1.0 / N) * np.ones(N)\n\n # Khachiyan Algorithm\n while err > tolerance:\n V = np.dot(Q, np.dot(np.diag(u), QT))\n M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix\n j = np.argmax(M)\n maximum = M[j]\n step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))\n new_u = (1.0 - step_size) * u\n new_u[j] += step_size\n err = np.linalg.norm(new_u - u)\n u = new_u\n\n # center of the ellipse \n center = np.dot(P.T, u)\n\n # the A matrix for the ellipse\n A = linalg.inv(\n np.dot(P.T, np.dot(np.diag(u), P)) - \n np.array([[a * b for b in center] for a in center])\n ) / d\n \n # Get the values we'd like to return\n U, s, rotation = linalg.svd(A)\n radii = 1.0/np.sqrt(s)\n\n rot_err = linalg.norm(np.identity(3)-abs(rotation))\n if(rot_err > 0.05):\n \tradii = np.array([radii[1],radii[0],radii[2]])\n return radii", "def draw_ellipse(position, covariance, ax=None, **kwargs):\n ax = ax or plt.gca()\n\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n\n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def draw_ellipse(frame, coordinate, line_color=(124, 0, 0), radius=1, normalized=True):\n\n x1 = coordinate[0]\n y1 = coordinate[1]\n\n if normalized:\n h = frame.shape[0]\n w = frame.shape[1]\n\n x1 = int(x1 * w)\n y1 = int(y1 * h)\n\n cv.circle(frame, (x1, y1), radius=radius, color=line_color, thickness=1)", "def draw_ellipse(position, covariance, ax=None, num_contours=5, **kwargs):\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n \n # Draw the Ellipse\n for nsig in range(1, num_contours):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))", "def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n \n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n \n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts", "def ellipse_to_rectangle(n, locations):\n major_axis_radius, minor_axis_radius, angle, center_x, center_y = locations\n pt = []\n for i in range(n):\n pt1 = (int(center_x[i]) - int(minor_axis_radius[i]), int(center_y[i]) - int(major_axis_radius[i]))\n pt2 = (int(center_x[i]) + int(minor_axis_radius[i]), int(center_y[i]) + int(major_axis_radius[i]))\n pt.append([pt1, pt2])\n return pt", "def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n\n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n\n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts", "def calcualte_ellipse_radii(guess, eccentricity = 0, perimeter = 2 * np.pi*1):\n return fsolve(ellipse_radii_test, guess, args = (eccentricity, perimeter))", "def DrawEllipse(*args, **kwargs):\n return _gdi_.DC_DrawEllipse(*args, **kwargs)", "def get_cov_ellipse(cov, centre, nstd, **kwargs):\n #WZN\n\n # Find and sort eigenvalues and eigenvectors into descending order\n eigvals, eigvecs = np.linalg.eigh(cov)\n order = eigvals.argsort()[::-1]\n eigvals, eigvecs = eigvals[order], eigvecs[:, order]\n\n # The anti-clockwise angle to rotate our ellipse by \n vx, vy = eigvecs[:,0][0], eigvecs[:,0][1]\n theta = np.arctan2(vy, vx)\n\n # Width and height of ellipse to draw\n width, height = 2 * nstd * np.sqrt(eigvals)\n return Ellipse(xy=centre, width=width, height=height,\n angle=np.degrees(theta), **kwargs)", "def DrawEllipsePointSize(*args, **kwargs):\n return _gdi_.DC_DrawEllipsePointSize(*args, **kwargs)", "def visualize_result(img, ellipse, radii=None, angles=None, reference_ellipse=None, strain=None, marker_size=8,\n title=\"\"):\n if img is None or ellipse is None:\n return img\n\n img = np.copy(img) # copy so we don't alter the original\n\n blue = (255, 0, 0)\n green = (0, 200, 0)\n red = (0, 0, 255)\n black = (0, 0, 0)\n strain_colors = ((0, 127, 255), (127, 0, 255))\n\n # draw the reference ellipse\n if reference_ellipse is not None:\n draw_ellipse(img, reference_ellipse, green, 1, True, 1)\n\n # draw the fitted ellipse\n draw_ellipse(img, ellipse, blue, 2, True, 1)\n\n # draw radial strain\n if radii is not None and angles is not None:\n assert len(radii) == len(angles)\n # duplicate colors to make sure we have enough for each radius\n strain_colors = strain_colors * int(np.ceil(len(radii) / len(strain_colors)))\n center, diam, angle = ellipse\n for r, a, c in zip(radii, angles, strain_colors):\n p1, p2 = draw_diameter(img, center, r, a, c, 1)\n draw_cross(img, p1, marker_size, a, c, 2)\n draw_cross(img, p2, marker_size, a, c, 2)\n\n # draw text\n font = cv.FONT_HERSHEY_SIMPLEX\n scale = 1.5\n margin = 20\n cv.putText(img, title, (margin, 60), font, scale, black, 3)\n if reference_ellipse is not None:\n cv.putText(img, \"Detected ellipse\", (margin, 120), font, scale, blue, 2)\n cv.putText(img, \"Reference ellipse\", (margin, 180), font, scale, green, 2)\n\n if strain is not None:\n cv.putText(img, \"Strain: {:.1f} %\".format(strain), (margin, 240), font, scale, black, 2)\n\n return img", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "def minimum_rotated_rectangle(self): # -> BaseGeometry:\n ...", "def DrawEllipse(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawEllipse(*args, **kwargs)", "def generate_ellipse(R1,R2,center,theta,N=100):\r\n t = np.linspace(0.0,2.0*np.pi,N)\r\n x = R1*np.cos(t)*np.cos(theta) - R2*np.sin(t)*np.sin(theta) + center[0]\r\n y = R1*np.cos(t)*np.sin(theta) + R2*np.sin(t)*np.cos(theta) + center[1]\r\n return x,y", "def constructByParamEllipse(x_center, y_center, maj, min, rot, radius_3d_circle, focal_length=1):\n e = ImpEllipse.construct_by_param(x_center, y_center, maj, min, rot)\n\n return Double3DCircle.construct_by_ImpEllipse(e, radius_3d_circle, focal_length)", "def determine_bounding_box_of_rotated_box(self, box, rotation_matrix):\n\n # top left, top right, bottom left, bottom right\n p1, p2, p3, p4 = box_points(box)\n\n # rotate all the points of the box\n tp1 = calc_rotate_point_with_rotation_matrix(p1, rotation_matrix)\n tp2 = calc_rotate_point_with_rotation_matrix(p2, rotation_matrix)\n tp3 = calc_rotate_point_with_rotation_matrix(p3, rotation_matrix)\n tp4 = calc_rotate_point_with_rotation_matrix(p4, rotation_matrix)\n\n # figure out which point has the furthest x distance, and the furthest y distance\n dx1 = abs(tp1[0] - tp4[0])\n dx2 = abs(tp2[0] - tp3[0])\n dy1 = abs(tp1[1] - tp4[1])\n dy2 = abs(tp2[1] - tp3[1])\n # the width and the height is the max distance between x and y\n w, h = max(dx1, dx2), max(dy1, dy2)\n\n # x and y is the min x, and min y among all points\n x = min(tp1[0], tp2[0], tp3[0], tp4[0])\n y = min(tp1[1], tp2[1], tp3[1], tp4[1])\n\n return (x, y, w, h)", "def get_exterior(self, x, y, x1, x2, bottom, head_y):\n fx1 = x+(x-x1)*8\n fx2 = x+(x-x2)*8\n # compute bounding ellipse; and intersection with body outline\n cv2.ellipse(self.ellipse_finder, ((x/mscale,y/mscale), ((fx1-fx2)/mscale, (2*(bottom-head_y))/mscale), 0), 255,-1 )\n intersection = np.bitwise_and(255-self.ellipse_finder, self.median_finder)\n # find external blobs\n im2, out_contours, out_hierarchy = cv2.findContours(intersection,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n return out_contours, out_hierarchy, fx1-fx2", "def _gen_ellipse(twiss, ep=1, num=100):\n a, b, c = twiss\n\n t = np.linspace(0, 2 * np.pi, num)\n t0 = np.arctan(a)\n x = np.sqrt(b * ep) * np.cos(t)\n y = np.sqrt(c * ep) * np.sin(t - t0)\n\n return np.vstack([x, y])", "def eclipse_compensate(aLocation):\n EQUATOR_RADIUS = 6378137.0 # equator radius, or \"spherical\" earth\n POLAR_RADIUS = 6356725.0 # ploar radius\n EP_DIFF = EQUATOR_RADIUS - POLAR_RADIUS # rad-diff between equator and pole\n \n # assuming linear slope from equator to pole\n r_center = POLAR_RADIUS + EP_DIFF * (1.0 - abs(aLocation.lat)/90) # the ring thru earth center\n r_level = r_center * math.cos(math.radians(aLocation.lat)) # the ring thru latitude level\n \n return [r_center, r_level]" ]
[ "0.72764295", "0.65389484", "0.6258115", "0.6085874", "0.6078846", "0.59491014", "0.5820374", "0.5784566", "0.561725", "0.55927783", "0.5559429", "0.5536257", "0.5470644", "0.5456046", "0.54508764", "0.54230434", "0.5402299", "0.53830856", "0.5359062", "0.53571004", "0.53560513", "0.53382266", "0.5328916", "0.5314603", "0.5278285", "0.526421", "0.5252613", "0.5245298", "0.5244697", "0.5243301", "0.5242532", "0.5242532", "0.5236282", "0.5232601", "0.52291036", "0.51700604", "0.51643294", "0.5152434", "0.5149357", "0.5147671", "0.5136441", "0.5118137", "0.5099325", "0.5098919", "0.50835234", "0.50798863", "0.5063783", "0.50600314", "0.50450844", "0.5044488", "0.502462", "0.50230616", "0.5020567", "0.50108546", "0.5007209", "0.5006757", "0.4990369", "0.4988743", "0.49806115", "0.49620646", "0.4955084", "0.49350673", "0.49310857", "0.49072263", "0.49036583", "0.49018162", "0.48816252", "0.48798743", "0.4878113", "0.486852", "0.48643646", "0.4854467", "0.48396644", "0.48237053", "0.48216644", "0.48196954", "0.48155782", "0.47958115", "0.478994", "0.477515", "0.4756133", "0.4749394", "0.47141978", "0.47078654", "0.47029993", "0.47008288", "0.47006717", "0.4695882", "0.46900976", "0.4689779", "0.4689779", "0.4689779", "0.46845865", "0.46709248", "0.4670893", "0.46501812", "0.4625324", "0.46126327", "0.46106678", "0.46102318" ]
0.5605659
9
Calculate the countour extend.
def __CalculateExtend(self, contour): area = self.__CalculateArea(contour) boundingBox = self.__CalculateBoundingBox(contour) return area / (boundingBox[2] * boundingBox[3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def life_insurance_to_recive_total(self):\n pass", "def extendability(self):\n self._extendability = 0.50 * self.ANA - 0.50 * self.DCC + 0.50 * self.MFA + 0.50 * self.NOP\n return round(self._extendability, 5)", "def calculate(self):", "def calculate(self):\n pass", "def update_points(self):\n #Calculate Upper Section\n total = 0\n for box in self._upper_section:\n total += box.points\n self._upperSum = total\n\n if total >= 63:\n self._bonus = 35\n total += 35\n self._upperTotal = total\n\n # Calculate Lower Section\n total = 0\n for box in self._lower_section:\n total += box.points\n\n if self.get_box(\"Yahtzee\").points > 0:\n total = total + (self._yahtzee_count - 1) * 100 # Yahtzee Bonus\n\n self._lowerTotal = total\n\n #Total Points\n self._grandTotal = self._upperTotal + self._lowerTotal", "def estimate_incumbent(self, startpoints):\n\n pass", "def calculate(self):\r\n pass", "def calibration(self) -> int:", "def calculate(self):\r\n\r\n pass", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def calc_intertie_offset_generation (self):\n self.generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n dist = self.comp_specs['distance to community']\n self.annual_transmission_loss = \\\n 1 - (\n (1- (self.comp_specs['transmission loss per mile']/ 100.0))\n ** dist)\n self.intertie_offset_generation = \\\n self.generation * (1 + self.annual_transmission_loss)\n\n gen_eff = self.intertie_generation_efficiency\n self.intertie_offset_generation_fuel_used = \\\n self.intertie_offset_generation / gen_eff\n #~ print 'self.proposed_generation',self.proposed_generation\n #~ print con", "def overall_reduction(self):\n return 84", "def private_pension_total(self):\n pass", "def total_steps(self) -> global___Expression:", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def calc_base_eff_and_infl(level):\n return 2 + (level - 1)", "def extend (self) :\n return (self.x_min, self.x_max, self.y_min, self.y_max)", "def calculate_activities(self):\n # Sleep\n sleep = self.sleep_hours * 0.95\n\n # Work\n if self.work_intensity == self.INTENSITY_LOW:\n work_factor = 1.5\n elif self.work_intensity == self.INTENSITY_MEDIUM:\n work_factor = 1.8\n else:\n work_factor = 2.2\n work = self.work_hours * work_factor\n\n # Sport (entered in hours/week, so we must divide)\n if self.sport_intensity == self.INTENSITY_LOW:\n sport_factor = 4\n elif self.sport_intensity == self.INTENSITY_MEDIUM:\n sport_factor = 6\n else:\n sport_factor = 10\n sport = (self.sport_hours / 7.0) * sport_factor\n\n # Free time\n if self.freetime_intensity == self.INTENSITY_LOW:\n freetime_factor = 1.3\n elif self.freetime_intensity == self.INTENSITY_MEDIUM:\n freetime_factor = 1.9\n else:\n freetime_factor = 2.4\n freetime = self.freetime_hours * freetime_factor\n\n # Total\n total = (sleep + work + sport + freetime) / 24.0\n return decimal.Decimal(str(total)).quantize(TWOPLACES)", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def intensity(self) -> int:", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def calculate_output(self):", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def patrimony_total(self):\n pass", "def _compute_ingreso_subtotal(self):\n for sub in self:\n sub.recurring_total = sum(\n line.ingreso for line in sub.animales_ids)", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def calc_points_office(self):\n if 'cong' in args.exp:\n if self.cnt_office >= 1:\n be = [0] * 8\n be += [1 if x == 'O' else 0 for x in self.b[ 0: 5]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[ 5:10]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[10:15]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[15:20]]\n be += [0] * 8\n max_points = 0\n vptab_office = (\n (0, 0, 0, 0, 0, 0),\n (0, 0, 1, 3, 6, 10),\n (0, 1, 3, 6, 10, 15),\n (0, 2, 5, 9, 14, 20),\n (0, 3, 7, 12, 18, 25),\n (0, 4, 9, 15, 22, 30)\n )\n for bi in range(20):\n if self.b[bi] == 'U':\n be[(bi // 5 + 1) * 7 + bi % 5 + 1] = 1\n total_visited = set()\n points = 0\n for i in range(8, 34):\n if be[i] and i not in total_visited:\n visited = floodfill(be, i)\n total_visited |= visited\n adj = min(len(visited), 5)\n for vi in visited:\n points += vptab_office[adj][self.f[(vi // 7 - 1) * 5 + vi % 7 - 1]]\n if points > max_points:\n max_points = points\n be[(bi // 5 + 1) * 7 + bi % 5 + 1] = 0\n return max_points\n else:\n if self.cnt_office >= 2:\n be = [0] * 8\n be += [1 if x == 'O' else 0 for x in self.b[ 0: 5]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[ 5:10]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[10:15]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[15:20]]\n be += [0] * 8\n points = 0\n total_visited = set()\n vptab_office = (\n (0, 0, 0, 0, 0, 0),\n (0, 0, 1, 3, 6, 10),\n (0, 1, 3, 6, 10, 15),\n (0, 2, 5, 9, 14, 20),\n (0, 3, 7, 12, 18, 25),\n (0, 4, 9, 15, 22, 30)\n )\n for i in range(8, 34):\n if be[i] and i not in total_visited:\n visited = floodfill(be, i)\n total_visited |= visited\n adj = min(len(visited), 5)\n for vi in visited:\n points += vptab_office[adj][self.f[(vi // 7 - 1) * 5 + vi % 7 - 1]]\n return points\n return 0", "def advancedStats():", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def extend(self,data):\n n = float(len(data))\n if n == 0:\n return self\n M2 = 0\n M3 = 0\n M4 = 0\n mean = 0\n vmin = None\n vmax = None\n for x in data:\n mean += x/n \n if vmin is None:\n vmax = x\n vmin = x\n if x < vmin:\n vmin = x\n if x > vmax:\n vmax = x\n for x in data:\n d = x-mean\n M2 += (d**2)\n M3 += (d**3)\n M4 += (d**4)\n x = LiveStat(self.name)\n x.vmin = vmin\n x.vmax = vmax\n x.vmean = mean\n x.vm2 = M2\n x.vm3 = M3\n x.vm4 = M4\n x.vcount = int(n)\n x.vcountsq = x.vcount**2\n x.dirty = True\n self.merge(x)\n return self", "def extend(self, step):\n self.timesteps.extend(step.timesteps)\n self.masks.extend(step.masks)\n self.x.extend(step.x)\n self.y.extend(step.y)\n self.i.extend(step.i)\n self.j.extend(step.j)\n self.end_time = step.end_time\n self.times = np.arange(self.start_time, self.end_time + self.step, self.step)\n self.u = np.concatenate((self.u, step.u))\n self.v = np.concatenate((self.v, step.v))\n for attr in self.attributes.keys():\n if attr in step.attributes.keys():\n self.attributes[attr].extend(step.attributes[attr])", "def _base(self, **kwargs):\n self._add_irregularities(**kwargs)\n self._normalize(offset=kwargs.get(\"offset\"))", "def ComputeEnergyConsumption(self):\r\n pass", "def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np", "def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy", "def total_calories(self, weight=75):\n return weight * 0.862911 * self.total_distance", "def __used(self):\n tot=0\n assign={}\n for c in self.assigned :\n if not assign.has_key(c.start) :\n assign[c.start]=c.end\n tot+=c.end-c.start+1\n return tot", "def calculate_vars(self):\n pass", "def expandcal(self):\n ind=np.zeros(self.spec.shape[0]).astype(int)\n for k in range(self.nscan):\n ind[self.getscanind(k)]=k\n ind[self.getcalind(k)]=k\n return ind", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def calc_net_generation_wind (self):\n self.net_generation_wind = self.generation_wind_proposed - \\\n self.transmission_losses -\\\n self.excess_energy\n #~ print 'self.net_generation_wind',self.net_generation_wind", "def get_total(self):\n total = super().get_total()\n\n if self.qty < 10:\n total += 3\n\n return total", "def system_capex(self):\n\n topside = self.config[\"offshore_substation_topside\"][\"unit_cost\"]\n substructure = self.config[\"offshore_substation_substructure\"][\"unit_cost\"]\n mooring = self.config[\"offshore_substation_substructure\"][\"mooring_cost\"]\n\n return self.num_substations * (topside + substructure + mooring)", "def calc_stat_values(self):", "def calcAllPhotoCurrents(self):\n pass", "def GOAL_TOTAL() -> int:\n return 21", "def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())", "def _get_fitness_increes(self, fit_list, func):\n inc = 0\n for index in range(1, len(fit_list)-1):\n inc += fit_list[index] - fit_list[index-1]\n\n return inc", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def extend_image(self):\n IJ.run(self.image, 'Select All', '')\n IJ.run(self.image, 'Copy', '')\n width = self.image.getWidth()\n height = int(self.image.getHeight() + self.extend_full)\n new_image = IJ.createImage(\n IMP.getTitle(), '32-bit black', width, height, 1)\n new_image.setRoi(0, 0, self.image.getWidth(), self.image.getHeight())\n IJ.run(new_image, 'Paste', '')\n IJ.run(new_image, 'Enhance Contrast', 'saturated=0.35')\n IJ.run(new_image, 'Select None', '')\n cal = Calibration(self.image)\n cal.pixelWidth = self.dispersion\n cal.xOrigin = self.offset\n new_image.setCalibration(cal)\n self.image = new_image\n return new_image", "def sum_over_energy(self):\n raise NotImplementedError(\"MapBase.sum_over_energy()\")", "def calculate(self, limit):\r\n pass", "def extend_pos(self, start: int, end: int) -> None:", "def import_grid(self):\n return max(0, self.current_energy_consumed - self.current_energy_produced)", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def calc(self):\n return None", "def calculate(self, limit):\n pass", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def _generateTotalMask(self):\r\n\r\n mask = np.zeros(self.altPosMap.shape)\r\n\r\n for patch in self.finalPatches.itervalues():\r\n mask = mask + patch.array.astype(np.float)\r\n\r\n mask = ni.binary_closing(mask,\r\n structure=np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]),\r\n iterations=self.params['borderWidth'])\r\n\r\n return mask.astype(np.int8)", "def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))", "def extend_info(self):\n return self._extend_info", "def maCruise(self):\n return .77", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def _extend_p_range(self) -> np.ndarray:\n mut_influence = self._perceive(self.p_range) \n neighborless = np.diagonal(mut_influence)\n return self.proxim_bounds[-1]*neighborless + self.p_range", "def nze(self) -> int:", "def nze(self) -> int:", "def num_points_sweep(self, start, stop, step):\r\n return(abs((stop - start)//step) + 1)", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def calculate_fitness(self):\n self.fitness = distMat[self.tour[-1]][self.tour[0]]\n for i in range(self.dimension):\n self.fitness += distMat[self.tour[i]][self.tour[i - 1]]", "def get_correct_lap_count(self):", "def additional_derivatives(self, increment_filter, k):\n ######################################################################\n # derivatives for saturated liquid at hot side outlet equation\n if self.subcooling.val is False:\n o1 = self.outl[0].to_flow()\n self.jacobian[k, 2, 1] = -dh_mix_dpQ(o1, 0)\n self.jacobian[k, 2, 2] = 1\n k += 1", "def tot(self):\n return self.det + self.out + self.faint + self.late", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def apply_action(self, action, time, timeSta):\n intersection = self.CONFIG['intersection']\n phase_of_interest = self.CONFIG['phase_of_interest']\n total_phases = len(self.CONFIG['phase_duration'])\n green_duration = self.CONFIG['phase_duration'][phase_of_interest - 1]\n\n # check the time duration is correct\n pdur = doublep()\n pcmax = doublep()\n pcmin = doublep()\n ECIGetDurationsPhase(self.CONFIG['intersection'], self.CONFIG['phase_of_interest'], timeSta, pdur, pcmax, pcmin)\n poi_duration = int(pdur.value())\n\n if self.extendedalready:\n print(\"int {} already extened for {} seconds, apply {} extension on top of it\".format(self.CONFIG['intersection'], self.extended, action))\n action = action + self.extended\n # clip the action to the legal limit\n if action >20:\n action =20\n if action <-20:\n action = -20\n else:\n print(\"int {} has no assigned extension, the phase of interest is {} sec, extending {} sec extension\".format(self.CONFIG['intersection'], poi_duration, action))\n\n\n if poi_duration != green_duration + self.extended:\n print(\"\\n\\n ERROR: phase duration already changed from {} to {} and self.extended value is {}\\n\\n\".format(green_duration, poi_duration, self.extended))\n\n phasetime = time - ECIGetStartingTimePhase(intersection)\n currentPhase = ECIGetCurrentPhase(intersection)\n if currentPhase == phase_of_interest:\n # check if the action is legal\n remaining_green = self._get_toNearGreenPhase(currentPhase, phasetime, 0)\n if remaining_green>=0 and action + remaining_green < 0:\n action = -remaining_green\n ECIChangeTimingPhase(intersection, phase_of_interest, green_duration + action, timeSta)\n if action != 0:\n self.extendedalready = 1\n else:\n self.extendedalready = 0\n self.extended = action\n\n print(\"------- {} Extend start here ----------\".format(intersection))\n print(\"Extended at time: {}\".format(time))\n print(\"Extended length: \" + str(action) + \" sec\")", "def dovetails(self):\n return self.dovetails_L + self.dovetails_R", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def get_lift(self):\n return 0.0", "def subbandwidth(self):", "def get_counters(self):\n return (self.initial, self.final)", "def extend(self, current):\n for arc in self.agenda:\n try:\n self.agenda.append(arc.get_extended(current))\n except ValueError:\n continue", "def offense(self):\n #return self.stats.strength + self.stats.level\n return self.stats.offense", "def get_inc(self):\n return self.inc_min + self.inc_ * self.inc_range", "def calc_heat_sum(tmin, tmax, tbase=6.0):\n tmax = min(tmax, 30.0)\n tx = (tmin + tmax) / 2.0\n return max(tx - tbase, 0.0)", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def superposition(self):\r\n superpos_array = [[0,0,0],[0,0,0],[0,0,0]]\r\n #check normalised:\r\n n = sum(self.block_weights)\r\n if n != 1:\r\n #normalise here if required\r\n self.block_weights = [x/n for x in self.block_weights]\r\n o = self.block_opts\r\n w = self.block_weights\r\n for i in range(TILE_SIZE):\r\n for j in range(TILE_SIZE):\r\n for k in range(len(o)):\r\n superpos_array[j][i] += 254*get_blocks(o[k])[j][i]*w[k] \r\n \r\n return superpos_array\r\n \r\n #propgate changes out\r", "def _sum_g_i(self) -> float:\n elems = self.composition.get_el_amt_dict()\n\n if self.interpolated:\n sum_g_i = 0\n for elem, amt in elems.items():\n g_interp = interp1d(\n [float(t) for t in G_ELEMS.keys()],\n [g_dict[elem] for g_dict in G_ELEMS.values()],\n )\n sum_g_i += amt * g_interp(self.temp)\n else:\n sum_g_i = sum(amt * G_ELEMS[str(self.temp)][elem] for elem, amt in elems.items())\n\n return sum_g_i", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def calculatePieces(self):\n pass;", "def extent(self, start=None, finish=None):\n start, finish = self.bounds(start, finish)\n try:\n return finish - start + 1\n except TypeError:\n return None", "def calculate(self) -> float:", "def num_deriv_exterior(\n cal: Calibration, cpar: ControlPar, dpos: float, dang: float, pos: vec3d\n):\n var = [\n cal.ext_par.x0,\n cal.ext_par.y0,\n cal.ext_par.z0,\n cal.ext_par.omega,\n cal.ext_par.phi,\n cal.ext_par.kappa,\n ]\n x_ders = np.zeros(6)\n y_ders = np.zeros(6)\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n xs, ys = img_coord(pos, cal, cpar.mm)\n\n for pd in range(6):\n step = dang if pd > 2 else dpos\n var[pd] += step\n\n if pd > 2:\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n xpd, ypd = img_coord(pos, cal, cpar.mm)\n x_ders[pd] = (xpd - xs) / step\n y_ders[pd] = (ypd - ys) / step\n\n var[pd] -= step\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n return (x_ders, y_ders)" ]
[ "0.56703246", "0.5525739", "0.5475888", "0.5367172", "0.5338628", "0.53284657", "0.53263986", "0.53008175", "0.529843", "0.5273235", "0.52313775", "0.5210509", "0.5181706", "0.51630366", "0.5138338", "0.51348615", "0.50815606", "0.50513184", "0.504204", "0.50351846", "0.50212395", "0.49979383", "0.49934825", "0.49804574", "0.49557364", "0.49522033", "0.49494788", "0.49321595", "0.49305394", "0.49235284", "0.49203852", "0.49200892", "0.4904673", "0.49041903", "0.4887036", "0.48705676", "0.48686856", "0.4867793", "0.48665982", "0.48463285", "0.48251137", "0.4823975", "0.48160216", "0.4807867", "0.48033127", "0.48027894", "0.47756755", "0.47709742", "0.4768809", "0.4767683", "0.4763976", "0.47611982", "0.47611982", "0.47611982", "0.47611982", "0.47586063", "0.47584483", "0.4751661", "0.47487426", "0.4745178", "0.4736124", "0.47357717", "0.4734068", "0.47269398", "0.4725467", "0.47176817", "0.47168037", "0.470572", "0.47021192", "0.47016618", "0.4700897", "0.4690974", "0.46874326", "0.46874326", "0.46839857", "0.46796653", "0.4678907", "0.46776173", "0.46629754", "0.4658822", "0.46578723", "0.46533272", "0.46518672", "0.46484178", "0.46457437", "0.46454832", "0.4644915", "0.46443805", "0.46433857", "0.46378294", "0.46363378", "0.4625479", "0.46240053", "0.4623706", "0.46236593", "0.4623003", "0.46225446", "0.46204683", "0.46155432", "0.46119586" ]
0.6228183
0
Check if a curve is convex or not.
def __IsConvex(self, contour): return cv2.isContourConvex(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convex(self):\n # Convex has positive curvature (2nd derivative)\n # f\"(x) = 2a, so a > 0 corresponds to convex\n return (self.a > 0)", "def _check_curve(layer: ogr.Layer) -> None:\n # Check if the feature geometry is polygonal:\n feature_defn = layer.GetLayerDefn()\n layer.ResetReading()\n feature = layer.GetNextFeature()\n while feature is not None:\n geom = feature.GetGeometryRef()\n name_wkt = geom.ExportToWkt()\n\n # Approximate a curvature by a polygon geometry:\n if 'curv' in name_wkt.lower():\n linear_geom = geom.GetLinearGeometry()\n new_feature = ogr.Feature(feature_defn)\n new_feature.SetGeometryDirectly(linear_geom)\n layer.CreateFeature(new_feature)\n layer.DeleteFeature(feature.GetFID())\n\n feature = layer.GetNextFeature()", "def convex(self, *args, **kwargs) -> Any:\n pass", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def isSetCurve(self):\n return _libsbml.GeneralGlyph_isSetCurve(self)", "def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right", "def isConvex(data, boundaryPointsDict, triangleDict, approximation ,demo = True):\n\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= epsilon:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n if demo:\n plotDemo(data, point, bdrPntIdx)\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def isSetCurve(self):\n return _libsbml.ReferenceGlyph_isSetCurve(self)", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def isConvexApproximate(data, boundaryPointsDict, triangleDict, approximation, tolerance):\n outliersAllowed = int(np.floor(tolerance * len(list(boundaryPointsDict.keys()))))\n\n outliersCount = 0\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= approximation:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n outliersCount += 1\n if outliersCount > outliersAllowed:\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def isSetCurve(self):\n return _libsbml.ReactionGlyph_isSetCurve(self)", "def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True", "def isSetCurve(self):\n return _libsbml.SpeciesReferenceGlyph_isSetCurve(self)", "def is_polygon_convex(polygon):\n c = center_of_mass_polygon(polygon)\n for i in range(-1, len(polygon) - 1):\n p0 = polygon[i]\n p1 = polygon[i - 1]\n p2 = polygon[i + 1]\n v0 = subtract_vectors(c, p0)\n v1 = subtract_vectors(p1, p0)\n v2 = subtract_vectors(p2, p0)\n a1 = angle_smallest_vectors(v1, v0)\n a2 = angle_smallest_vectors(v0, v2)\n if a1 + a2 > pi:\n return False\n return True", "def is_curve(geo):\n geo = geo.strip().upper()\n\n for a_geo_type_in_curve_geo_types_list in CURVE_TYPES:\n if geo.startswith(a_geo_type_in_curve_geo_types_list):\n return True\n\n continue\n\n return False", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) \\\n or (x == None and y == None)\n\n if x == None and y == None:\n return True\n\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid", "def is_convex(reg, abs_tol=ABS_TOL):\n if not is_fulldim(reg):\n return True\n if len(reg) == 0:\n return True\n outer = envelope(reg)\n if is_empty(outer):\n # Probably because input polytopes were so small and ugly..\n return False, None\n Pl, Pu = reg.bounding_box\n Ol, Ou = outer.bounding_box\n bboxP = np.hstack([Pl, Pu])\n bboxO = np.hstack([Ol, Ou])\n if (\n sum(abs(bboxP[:, 0] - bboxO[:, 0]) > abs_tol) > 0 or\n sum(abs(bboxP[:, 1] - bboxO[:, 1]) > abs_tol) > 0):\n return False, None\n if is_fulldim(outer.diff(reg)):\n return False, None\n else:\n return True, outer", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def point_in_poly(x_point: float, y_point: float) -> bool:\n\n # Semi-F47 extended states all devices should be able to ride out a sag of up to 1 cycle.\n if x_point <= 1:\n return False\n\n point = shapely.geometry.Point(x_point, y_point)\n return POLYGON.contains(point) or POLYGON.intersects(point)", "def ispolygonXY(a):\n return ispolygon(a) and isXYPlanar(a)", "def checkconvexity(self): # 3\n res = self.__obj.checkconvexity()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def below_curve(cls, curve):\n def _shape(site):\n x, y = site.pos\n return y < curve(x)\n return Shape(_shape)", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def convexify(domain):\n\n if isinstance(domain, isl.BasicSet):\n return domain\n\n dom_bsets = domain.get_basic_sets()\n if len(dom_bsets) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n domain = domain.coalesce()\n\n dom_bsets = domain.get_basic_sets()\n if len(domain.get_basic_sets()) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n dom_bsets = domain.get_basic_sets()\n assert len(dom_bsets) > 1\n\n print(\"PIECES:\")\n for dbs in dom_bsets:\n print(\" %s\" % (isl.Set.from_basic_set(dbs).gist(domain)))\n raise NotImplementedError(\"Could not find convex representation of set\")", "def func_curvature(self):\n return u.Curvature.CONVEX", "def test_spheroid_convexity(spheroid_convex_fixture):\n assert(spheroid_convex_fixture.convex_p() == pytest.approx(1.0))\n assert(spheroid_convex_fixture.linear_p() == pytest.approx(0.0))", "def ispolygon(a):\n return ispoly(a) and dist(a[0],a[-1]) < epsilon", "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def point_outside_conus(pt):\n return not pt.within(CONUS[\"poly\"]) and pt.distance(CONUS[\"poly\"]) > 0.001", "def is_posdef(X):\n return np.min(np.linalg.eigvals(X)) > 0", "def continuous(self, x, y, X, Y):\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)", "def convex(points):\r\n if isinstance(points, np.ndarray):\r\n points = np.unique(points, axis=0)\r\n else:\r\n pts = []\r\n points = [pts.append(i) for i in points if i not in pts] # Remove duplicates\r\n del pts\r\n if len(points) <= 1:\r\n return points\r\n # Build lower hull\r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n #print(\"lower\\n{}\\nupper\\n{}\".format(lower, upper))\r\n return np.array(lower[:-1] + upper) # upper[:-1]) # for open loop\r", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def is_elliptic(self):\n if self.is_irreducible():\n return self._info['elliptic']\n else:\n return False", "def is_pos_semidef(X, positive_definite=False):\n if not positive_definite:\n return np.all(np.linalg.eigvals(X) >= 0)\n else:\n return np.all(np.linalg.eigvals(X) > 0)", "def is_positive_definite(x):\n return np.all(np.linalg.eigvals(x) > 0)", "def isPointCollide(self, point):\n return self.p[0] <= point <= self.p[2]", "def isValidPcbShape(g):\n return g.GetShape() != pcbnew.S_SEGMENT or g.GetLength() > 0", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def is_polycyclic(self):\n return self.is_solvable", "def __draw_curve(self, points):\n x_pts = []\n y_pts = []\n curvex = []\n curvey = []\n self.debug += 1\n for point in points:\n x_pts.append(point[0])\n y_pts.append(point[1])\n curve = scipy.interpolate.interp1d(x_pts, y_pts, 'cubic')\n if self.debug == 1 or self.debug == 2:\n for i in np.arange(x_pts[0], x_pts[len(x_pts) - 1] + 1, 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n else:\n for i in np.arange(x_pts[len(x_pts) - 1] + 1, x_pts[0], 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n return curvex, curvey", "def isInside(self, point):\n # we rotate back the point to the frame parallel to the axis of the ellipse\n rotatedPoint = self.rotatePoint(point)\n # we check if each point is inside the associated liquid drop\n return ((rotatedPoint[:, :, 0]/self.axisA[:, None])**2 + (rotatedPoint[:, :, 1]/self.axisB[:, None])**2 < 1)", "def is_pos_def(x):\n rtol = 1e-05\n atol = 1e-08\n return np.all(np.linalg.eigvals(x) > 0) and np.allclose(\n x, x.T, rtol=rtol, atol=atol\n )", "def point_on_curve(self, P):\n x, y = modp(self.p, P.x, P.y)\n lhs = y ** 2\n rhs = x ** 3 + x * self.a + self.b\n return lhs == rhs", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def has_point(self, point: AbstractPoint) -> bool:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n factor_a, factor_b = plane.calculate_point_factors(point)\n if factor_a is not None and factor_b is not None:\n return (0.0 <= factor_a <= 1.0 and 0.0 <= factor_b <= 1.0 and\n (factor_a + factor_b) <= 1.0)\n\n return False", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def check_ccf(lags, coeffs, confidence, threshold=0.95):\n in_band = np.sum(np.abs(coeffs) < confidence) / len(coeffs)\n return in_band >= threshold, in_band", "def _handle_curve_collider(self, collider):\n point = collider.curve.closestPoint(self.transform.getTranslation(ws=True))\n collision_vector = pm.datatypes.Vector(point - self.transform.getTranslation(ws=True))\n point = point - collision_vector.normal() * 2.05 * self.transform.getAttr('sx')\n if collision_vector.length() <= self.transform.getAttr('sx') * 2:\n if collider in self.parent.colliders:\n self.parent.on_collide(collider, point, collision_vector.length())\n else:\n self.parent.colliders.append(self)\n self.parent.on_collide_enter(collider, point, collision_vector.length())\n # end if\n else:\n if collider in self.parent.colliders:\n self.parent.colliders.remove(self)\n self.parent.on_collide_exit(collider, point)\n # end if\n # end if", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def left_of_curve(cls, curve):\n def _shape(site):\n x, y = site.pos\n return x < curve(y)\n return Shape(_shape)", "def ispolycw(x, y):\n\n l = len(x)\n if l < 3:\n raise ValueError(\"ispolycw::X length is less than 3\")\n\n if len(y) < 3:\n raise ValueError(\"ispolycw::Y length is less than 3\")\n\n if l != len(y):\n raise ValueError(\"ispolycw::Non-equal sized arrays\")\n\n s = 0.0\n for k in range(0, l):\n kn = (k + 1) % l\n\n s += (x[kn] - x[k]) * (y[kn] + y[k])\n\n return (s > 0.0, 0.5*s) # CW flag, signed area of the polygon", "def _inside_isheating(ci, hi, co, ho) -> bool:\n return abs(298.15 - ci.T) - abs(hi.T - 298.15) > 0", "def spheroid_convex():\n problem, representation, initial_sample = spheroid_sample()\n\n return exploratory.ELAConvexity(problem, representation, design_individuals=initial_sample)", "def has_vertex(self, x, y):\n\n return min(x, y) > 0 and x <= self.width and y <= self.height", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0", "def check_for_include(self, obj):\n if obj.type == \"CURVE\":\n o = []\n for i in obj.data.splines:\n o.append(i.type == \"POLY\" or i.type == \"BEZIER\")\n return False not in o\n elif obj.type in (\"FONT\", \"MESH\"):\n return True\n else:\n return False", "def is_solution(self, csp):\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())", "def isPointCollideWithMargin1(self, point):\n return self.p[0]-Vect(1, 1) <= point <= self.p[2]", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def ppl_check_non_negative_cone(C):\n if not isinstance(C, ppl.C_Polyhedron):\n raise ValueError('C must be a polyhedron in the right ambient space')\n\n n = C.space_dimension()\n if not C.contains(ppl.C_Polyhedron(ppl_zero_point(n))):\n raise ValueError('the cone does not contain zero')\n if not ppl_positive_cone(n).contains(C):\n raise ValueError('C must be a subcone of the non-negative cone')\n for g in C.generators():\n if g.is_point() and not g.is_equivalent_to(ppl_zero_point(n)):\n raise ValueError('should have only zero as vertices'.format(g))\n if g.is_line():\n raise ValueError('the cone contains a line')", "def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0", "def checks(self, poly_fit, poly_fitx, poly_fity):\n if self.best_fit is not None:\n if not (np.abs(self.best_fit-poly_fit) <\n np.array([0.001, 1, 500])).all():\n return False\n if self.bestx is not None:\n if np.mean(np.abs(self.bestx-poly_fitx)) > 200:\n return False\n\n return True", "def _checkPoly1Contour(poly):\n # if len(poly) == 0:\n # logger.warning(\"Polygon is empty. Accepting with warning.\")\n\n if len(poly) > 1:\n msg = \"Error: Current version of eval_seg cannot handle polygons with multiple contours.\"\n logger.error(msg)\n logger.error(\"Poly: %s\" % poly)\n raise ValueError(msg)", "def ispoly(a):\n return isinstance(a,list) and len(a) > 2 and \\\n len(list(filter(lambda x: not ispoint(x),a))) == 0", "def can_fix_intersection(self, segment):\n\n points = segment.points\n points = [points[1], points[2], points[3], points[2], points[1], points[0]]\n path = create_path(points)\n layer = GSLayer()\n layer.paths.append(path)\n\n if layer.paths[0].insertNodeWithPathTime_(2.5) is None:\n return False\n for segment in layer.paths[0].segments[:-1]:\n # We need to check only curve segments which consist of four points.\n if len(segment.points) == 4:\n s_t = self.triangle_error_of(segment.points, do_round=True)\n if s_t is not None:\n points = points2vectors(segment.points)\n ok = False\n for s, t in self.calculate_s_t_candidates(points, s_t):\n if self.try_update_points(points, s, t) is not None:\n ok = True\n break\n if not ok:\n return False\n return True", "def contains_point(self, point):\n\t\tthreshold = 0.6\n\t\tx = point[0]\n\t\ty = point[1]\n\t\tif (x >= (self.xmin - threshold) and x <= (self.xmax + threshold) and\n\t\t\ty >= (self.ymin - threshold) and y <= (self.ymax + threshold)):\n\t\t return True\n\t\treturn False", "def isOnLine(self, point):\n if((point < self.start and point < self.end) or (\n point > self.start and point > self.end)):\n return False #point is not between the start and end of self\n \n if(self.getArea(self.start, self.end, point) > c.EPSILON):\n return False #points are not co-linear\n \n return True", "def closed_v(self):\n sa = ShapeAnalysis_Surface(self.surface())\n return sa.IsVClosed()", "def in_box(point, c1, c2):\n c1x, c1y = c1\n c2x, c2y = c2\n x, y = point\n return min(c1x, c2x) <= x <= max(c1x, c2x) and min(c1y, c2y) <= y <= max(c1y, c2y)", "def is_perfect_square():", "def is_confident(csq, num_choices):\n return csq >= CHI_SQUARE_DISTRIBUTION[num_choices - 2]", "def goodPlace(contour):\n perimeter = cv2.arcLength(contour, True)\n x, y, w, h = cv2.boundingRect(contour)\n if y<=0 or x<=0:\n return False\n elif y+h >=2016 or x+w>=3840:\n return False\n return True", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True", "def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True", "def is_calibrated(self):\n\n return self.bin_edges_kev is not None", "def can_draw(self,point):\n if point <= 0:\n return False\n else:\n return True", "def _CheckConvergence(self):\n self.is_converged = True\n self.are_converged[0] = (abs(self.delta_e) < self.conv_delta_e)\n self.are_converged[1] = (self.grad_rms < self.conv_grad_rms)\n self.are_converged[2] = (self.grad_max < self.conv_grad_max)\n self.are_converged[3] = (self.disp_rms < self.conv_disp_rms)\n self.are_converged[4] = (self.disp_max < self.conv_disp_max)\n for i in range(5):\n if self.must_converge[i] and not self.are_converged[i]:\n self.is_converged = False", "def is_valid_grasp(point, focus_mask):\n ymid = int(point[0])\n xmid = int(point[1])\n d = cfg.CHECK_RANGE\n\n check_box = focus_mask.data[ymid - d:ymid + d, xmid - d:xmid + d]\n num_nonzero = np.sum(check_box > 0)\n\n fraction_nonzero = (num_nonzero * 1.0)/((2 * d)**2)\n return fraction_nonzero < 0.2", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def isOnCanvas(self, x, y):\n return 0 <= x < self.width and 0 <= y < self.height", "def test_convex_init(self):\n print(\"Convex_Init\")\n finder = dc.dock.ConvexHullPocketFinder()", "def is_vector(self):\n return len(self.coeffs.shape[self.sdim:]) == 1", "def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def within_cone(self, cone, n, v):\n if (v.dot(cone) < 0).any(): # v should point in same direction as cone\n v = -v # don't worry about sign, we don't know it anyway...\n f = -n / np.linalg.norm(n)\n alpha = np.arccos(f.T.dot(v) / np.linalg.norm(v))\n return alpha <= np.arctan(self.friction_coef), alpha", "def is_continuous(parameter):\n return sum([isinstance(parameter, p) for p in continuous_params])>0", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def contains_point(self, x, y = None):\n x, y = y is not None and Point(x, y) or Point(x[0], x[1])\n\n cond1 = self.min_x() <= x <= self.max_x()\n cond2 = self.min_y() <= y <= self.max_y()\n return self.is_point_on_same_line(x, y) and cond1 and cond2", "def is_Hypersurface(self):\n return isinstance(self, (ProjectiveHypersurface, AffineHypersurface))" ]
[ "0.7643623", "0.6757536", "0.67146444", "0.65801567", "0.6464188", "0.6454179", "0.635574", "0.62782335", "0.61885536", "0.61603767", "0.6148129", "0.6033672", "0.60245943", "0.590163", "0.580078", "0.5784255", "0.57838607", "0.5783847", "0.5773544", "0.57301515", "0.5673152", "0.565746", "0.56263477", "0.56033194", "0.55996966", "0.5594187", "0.5550376", "0.5549927", "0.54887104", "0.5479333", "0.5455991", "0.5442707", "0.5437503", "0.5426695", "0.5425105", "0.5419447", "0.5390327", "0.53815", "0.5369387", "0.5336056", "0.5333933", "0.5323544", "0.53211665", "0.5301559", "0.52961725", "0.5283968", "0.52068573", "0.519734", "0.51833737", "0.518064", "0.5179767", "0.51772344", "0.51743513", "0.51730365", "0.517004", "0.5169872", "0.5169872", "0.5168115", "0.51384926", "0.5129415", "0.51247454", "0.51220137", "0.5111877", "0.510995", "0.5105578", "0.51019394", "0.510055", "0.5081687", "0.5079676", "0.50773185", "0.5071698", "0.5069468", "0.5063318", "0.50570637", "0.50503594", "0.5048808", "0.5034397", "0.5026661", "0.50250494", "0.5024488", "0.5021849", "0.501581", "0.5006354", "0.49972868", "0.49936348", "0.49842608", "0.49724242", "0.4972042", "0.4968242", "0.49625257", "0.49561659", "0.49477026", "0.49446112", "0.49436563", "0.49436563", "0.49432543", "0.49404553", "0.49359047", "0.49291497", "0.49222782" ]
0.73451805
1
Calculate a contour perimeter or a curve length.
def __CalculateLength(self, curve): return cv2.arcLength(curve, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)", "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def perimeter(self):", "def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)", "def perimeter(self):\n\t\treturn 2 * (self.width + self.height)", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def perimeter(self):\n\t\treturn self.height * 4", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])", "def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )", "def get_perimeter_formula(cls):\n pass", "def perimeter(self):\n perimeter = (2 * self.__length) + (2 * self.__width)\n\n return perimeter", "def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)", "def perimeter(self):\r\n return (2*self.width) + (2*self.height)", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return 2 * (self.height + self.width)", "def perimeter(points):\n return sum(get_distances(points))", "def perimeter(a:float, b:float, c:float):\n return a + b + c", "def PolyPerimeter(Coords):\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n # next point coord - current point coord\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1] - Coords[i,1])**2 )**0.5\n\n return peri", "def edge_perimeter_length(c, stencil=nn_stencil):\n\n return np.sum(np.logical_not(c) * coordination(c, stencil=stencil))", "def perimeter_distance(self, p1, p2):\n\n p1_projection = self.outline.project(shgeo.Point(p1))\n p2_projection = self.outline.project(shgeo.Point(p2))\n\n distance = p2_projection - p1_projection\n\n if abs(distance) > self.outline_length / 2.0:\n # if we'd have to go more than halfway around, it's faster to go\n # the other way\n if distance < 0:\n return distance + self.outline_length\n elif distance > 0:\n return distance - self.outline_length\n else:\n # this ought not happen, but just for completeness, return 0 if\n # p1 and p0 are the same point\n return 0\n else:\n return distance", "def get_corrected_arclength(pts,closed=False):\r\n \r\n l = len(pts)\r\n ptsDown2 = np.concatenate((pts[2:l],pts[0:2]))\r\n ptsDown1 = np.concatenate((pts[1:l],np.array([(pts[0][0],pts[0][1])])))\r\n ptsUp1 = np.concatenate((np.array([(pts[l-1][0],pts[l-1][1])]),pts[0:l-1]))\r\n ptsUp2 = np.concatenate((pts[l-2:l],pts[0:l-2]))\r\n summedPts = ptsDown2 + ptsDown1 + pts + pts + ptsUp1 + ptsUp2\r\n avePts = summedPts/5.0\r\n zoomAvePts = np.round(avePts)\r\n arcLength = cv2.arcLength(zoomAvePts.astype(int),closed)\r\n \r\n return arcLength", "def regular_polygon_area(perimeter, apothem):\n return (perimeter * apothem) / 2", "def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)", "def get_rect_perimeter(length, width):\n length = (str)(length)\n width = (str)(width)\n if((length.isnumeric()) and (length.isnumeric())):\n length = (float)(length)\n width = (float)(width)\n perimeter = 2 * (length + width)\n else:\n perimeter = \"Invalid input, length and width must be numeric value\"\n return perimeter", "def circumference(self):\n raise NotImplementedError", "def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def perimeter(self) -> ir.FloatingValue:\n return ops.GeoPerimeter(self).to_expr()", "def area_of_circle(radius):\n return radius", "def test_triangle_get_perimeter(self):\n triangle = Triangle(0, 9, 10, 11)\n self.assertEqual(triangle.get_perimeter(), 30)", "def get_perimeter_formula(cls):\n dict_perimieter = {'circle':\"2πr\", 'square':'2a+2b', 'rectangle':'2a+2b',\n 'triangle':'3a',\"equilateral triangle\":'a+b+c',\n 'regular pentagon':\"5a\"}\n for k,v in dict_perimieter.items():\n if cls.__name__ == k:\n return v", "def lengthPerArea(I):\n perim = skimage.measure.perimeter(I.astype(int), 8)\n LA = perim / np.sum(I)\n print(\"LA (true count): {:.2%}\".format(LA))\n\n # lines probes, every 40 pixels\n probe = np.zeros(I.shape)\n probe[20:-20:40, 20:-20] = 1\n lines = I.astype(int) * probe\n\n # count number of intercepts\n h = np.array([[1, -1, 0]])\n points = scipy.signal.convolve2d(lines, h, mode='same')\n\n nb_lines = np.sum(lines)\n nb_points = np.sum(np.abs(points))\n PL = float(nb_points) / nb_lines\n print(\"pi/2*PL (evaluation): {:.2%}\".format(np.pi/2*PL))", "def calc_area(diameter):\n\n if diameter > 0:\n area = pi * (diameter/2) ** 2\n \n return area", "def area_circle(r):\n return (r ** 2) * math.pi", "def equivalentDiameter(cnt):\n\treturn np.sqrt(4 * (cv2.contourArea(cnt)) / np.pi)", "def circleArea(radius):\n return math.pi * radius * radius", "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def get_diameter(self, method='volume'):\n\n if method == 'shape':\n pos = self.get_positions() - self.center\n d = 0.0\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(pos, n)\n d += r.max() - r.min()\n return d / len(self.surfaces)\n elif method == 'volume':\n V_cell = np.abs(np.linalg.det(self.lattice_basis))\n N_cell = len(self.atomic_basis)\n N = len(self)\n return 2.0 * (3.0 * N * V_cell / (4.0 * math.pi * N_cell)) ** (1.0/3.0)\n else:\n return 0.0", "def __CalculateArea(self, contour):\r\n return cv2.contourArea(contour)", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def area(r):\n return np.pi * (r ** 2)", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def circle_area(radius):\n return math.pi * radius ** 2", "def get_perimeter(self, radius: int = 1) -> set:\n return self.get_neighbourhood(radius) - self.get_neighbourhood(radius - 1)", "def calculate_perimeter_ratio(gt_perimeter, perf_perimeter):\n return min(gt_perimeter, perf_perimeter) / max(gt_perimeter, perf_perimeter)", "def calculate_perimeter_diff(gt_perim, perf_perim):\n return abs(gt_perim - perf_perim) / gt_perim", "def circumference(self):\n return self.width + self.height", "def _causal_measure(self, x, y):\r\n\t\tC_xy = self._cross_cumulant_4th(x, y)\r\n\t\tC_yx = self._cross_cumulant_4th(y, x)\r\n\t\tR = C_xy**2 - C_yx**2\r\n\t\treturn R", "def get_interaction_length(self):\n return self.radius + 2.0 #in um", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def length(x):\n if ispoint(x):\n # return pointlength(x):\n return 0.0\n elif isline(x):\n return linelength(x)\n elif isarc(x):\n return arclength(x)\n elif ispoly(x):\n return polylength(x)\n elif isgeomlist(x):\n l = 0.0\n for g in x:\n l += length(g)\n return l\n else:\n raise ValueError(\"inappropriate type for length(): \".format(x))", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def circumference(self):\n return math.pi * self.radius * 2", "def perimeter_points(d,n,type = 'int'):\n rimpointsx = np.sin(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1\n rimpointsy = np.cos(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1\n rimpoints = (((d-1)/2))*np.array([rimpointsy,rimpointsx])\n if type == 'int':\n rimpoints = np.round(rimpoints)\n rimpoints = rimpoints.astype(int)\n return rimpoints", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def getArea(self):\n return math.pi * self.radius ** 2", "def get_pupil_diameter(dlc):\r\n diameters = []\r\n # Get the x,y coordinates of the four pupil points\r\n top, bottom, left, right = [np.vstack((dlc[f'pupil_{point}_r_x'], dlc[f'pupil_{point}_r_y']))\r\n for point in ['top', 'bottom', 'left', 'right']]\r\n # First compute direct diameters\r\n diameters.append(np.linalg.norm(top - bottom, axis=0))\r\n diameters.append(np.linalg.norm(left - right, axis=0))\r\n\r\n # For non-crossing edges, estimate diameter via circle assumption\r\n for pair in [(top, left), (top, right), (bottom, left), (bottom, right)]:\r\n diameters.append(np.linalg.norm(pair[0] - pair[1], axis=0) * 2 ** 0.5)\r\n\r\n # Ignore all nan runtime warning\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\r\n return np.nanmedian(diameters, axis=0)", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total", "def Lengths(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.element_type == \"line\":\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n else:\n # self.GetEdges()\n # coords = self.points[self.all_edges,:]\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n\n return lengths", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def diameter(self):\n return 2 * self.radius", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def get_dimensions_from_contour(img, cntr, kernel):\n\tmask = np.zeros_like(img) # mask will contain the fitted and adjusted ellipse of a single obstacle\n\tellipse = cv2.fitEllipse(cntr)\n\tx, y, obj_length, obj_height = cv2.boundingRect(cntr)\n\trect = cv2.minAreaRect(cntr)\n\n\tequi_diameter = obj_length # bounding rectangle gives a better approximation of diameter\n\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\tmask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1) # draw the fitted ellipse\n\trows = mask.shape[0]\n\tcols = mask.shape[1]\n\tM = np.float32([[1, 0, 0], [0, 1, equi_diameter / 4]]) # shift mask down to match obstacle, not edge\n\tmask = cv2.warpAffine(mask, M, (cols, rows))\n\tmask = cv2.erode(mask, kernel, iterations=3) # erode the mask to remove background points\n\treturn mask, box, x, y, obj_length, obj_height", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def calc_half_perimeter(self, source, sinks):\n deltax = 0\n deltay = 0\n assert self.cells[source].x in range(self.nx) and self.cells[source].y in range(self.ny)\n for sink in sinks:\n assert self.cells[sink].x in range(self.nx) and self.cells[sink].y in range(self.ny)\n dx = abs(self.cells[source].x - self.cells[sink].x)\n if dx > deltax:\n deltax = dx\n dy = abs(self.cells[source].y - self.cells[sink].y)\n if dy > deltay:\n deltay = dy\n return deltax + deltay", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def get_length_sqrd(self):\n return self.x**2 + self.y**2", "def calculate_curvature(P):\n y = P[:,1].copy()\n x = P[:,0].copy()\n dx = np.gradient(x)\n yd = np.gradient(y, dx)\n ydd = np.gradient(yd, dx)\n return np.sum(ydd**2)", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def calc_length_distortion(self, x, y):\n\n # get the major axis of the used Earth ellipsoid\n ellaxis = Geodesic.WGS84.a\n\n # get the centre of the subgrid's projection\n fe = self.core.projection.osr_spref.GetProjParm('false_easting')\n fn = self.core.projection.osr_spref.GetProjParm('false_northing')\n\n # create the distances to the projection centre\n dists = np.sqrt((np.array(x) - fe)**2 + (np.array(y) - fn)**2)\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n #\n # is it just about the distance to the centre (c), and as are equally long\n # on the ellipsoid and on the projected plane (the core of of AEQD!)\n k = dists / ellaxis / np.sin(dists / ellaxis)\n\n return k", "def getDiameter(self):\n\n hdr = self.header\n if \"cd1_1\" in hdr:\n self.D = abs(hdr[\"cd1_1\"]) * hdr[\"naxis1\"]\n elif \"cdelt1\" in hdr:\n self.D = abs(hdr[\"cdelt1\"]) * hdr[\"naxis1\"]\n else:\n print(\"Warning: no coordinate information found in input header;\")\n print(\" pupil width assumed to be 6.5 meters\")\n self.D = 6.5", "def computeA(diameter):\n radius = diameter / 2.0\n return np.pi * (radius**2)", "def area(cnt):\n\treturn cv2.contourArea(cnt)", "def area(self):\n return self.radius*self.radius*math.pi", "def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)", "def compute_thickness(self):\n com = vtk.vtkCenterOfMass()\n com.SetInputData(self.inner_rim_poly)\n center = np.asarray(com.GetCenter()) # take center from inner points (not outer)\n\n irp_numpy = numpy_support.vtk_to_numpy(self.inner_rim_poly.GetPoints().GetData())\n orp_numpy = numpy_support.vtk_to_numpy(self.outer_rim_poly.GetPoints().GetData())\n\n # compute average radius ..\n rs_inner = np.linalg.norm(irp_numpy - np.tile(center, (irp_numpy.shape[0], 1)), axis = 1)\n rs_outer = np.linalg.norm(orp_numpy - np.tile(center, (orp_numpy.shape[0], 1)), axis = 1)\n\n # average out\n r_inner = np.mean(rs_inner)\n r_outer = np.mean(rs_outer)\n\n # compute distance\n d = r_outer - r_inner\n self.thickness = d\n\n return d", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def area(self):\n return self.length*self.length" ]
[ "0.7951354", "0.72823113", "0.69471735", "0.68977535", "0.6630319", "0.65940684", "0.65453714", "0.65153366", "0.6505985", "0.64768934", "0.6449339", "0.6431239", "0.6404429", "0.63940036", "0.6379212", "0.6343764", "0.6341372", "0.6341372", "0.63238937", "0.6322581", "0.622877", "0.60354203", "0.6013534", "0.59701145", "0.58330923", "0.57935125", "0.5785374", "0.57831395", "0.5719135", "0.5704512", "0.56844455", "0.56768113", "0.56677145", "0.561239", "0.56097686", "0.56079733", "0.5569449", "0.5567249", "0.55187273", "0.55014026", "0.54781795", "0.54734296", "0.5462582", "0.5446707", "0.5432392", "0.54239553", "0.5418415", "0.5401736", "0.53998476", "0.537132", "0.5363943", "0.53533345", "0.534669", "0.53014123", "0.530025", "0.53001666", "0.529984", "0.5292733", "0.52864105", "0.52851367", "0.52849346", "0.52849346", "0.52808803", "0.527688", "0.527319", "0.52651393", "0.526471", "0.52587813", "0.5257371", "0.52541536", "0.52541536", "0.5253033", "0.5245788", "0.52428776", "0.52428776", "0.5239803", "0.5236999", "0.5234423", "0.52285755", "0.52285063", "0.5221778", "0.5212223", "0.5211332", "0.52090275", "0.5208559", "0.519778", "0.51897657", "0.518909", "0.5187427", "0.5184251", "0.51599014", "0.5159682", "0.51582503", "0.51579803", "0.51554585", "0.51540995", "0.51523566", "0.5145641", "0.5142069", "0.514043" ]
0.66685665
4
Calculate the contour moments to help you to calculate some features like center of mass of the object, area of the object etc.
def __CalculateMoments(self, contour): return cv2.moments(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moments(cnt):\n\treturn cv2.moments(cnt)", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def moments(self):", "def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)", "def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)", "def get_image_moments(image=None, contour=None, threshold=3):\n\tif contour is None and image is not None:\n\t\tcontour = get_contour(image, threshold)\n\treturn cv2.moments(contour)", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)", "def center_of_contour(contorno):\n M = cv2.moments(contorno)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"]!=0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (int(cX), int(cY))\n else:\n return (200,150)", "def find_center( contours ):\r\n ret = []\r\n\r\n for x in contours:\r\n M = cv2.moments( x )\r\n pt = Point()\r\n pt.x = int( M['m10']/M['m00'] )\r\n pt.y = int( M['m01']/M['m00'] )\r\n\r\n ret.append( pt )\r\n\r\n return( ret );", "def moments(self, data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return x, y, width_x, width_y, height", "def moments(data):\n height = data.max()\n background = data.min()\n data = data - np.min(data)\n total = data.sum()\n x, y = np.indices(data.shape)\n x = (x * data).sum() / total\n y = (y * data).sum() / total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())\n width_x /= gaussian_sigma_to_fwhm\n width_y /= gaussian_sigma_to_fwhm\n return {\n \"amplitude\": height,\n \"x\": x,\n \"y\": y,\n \"sigma_x\": width_x,\n \"sigma_y\": width_y,\n \"background\": background,\n \"theta\": 0.0,\n }", "def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid", "def find2D_higher_moments(image, centroid, halfwidths, c_sum):\n \n # Unpack centroid to seperate values\n xcen, ycen = np.floor(centroid)\n xhw, yhw = halfwidths\n \n xmoment2 = 0\n xmoment3 = 0\n ymoment2 = 0\n ymoment3 = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n y_range = np.array((np.floor(ycen - yhw) - 1, np.ceil(ycen + yhw) - 1))\n \n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n xloc = ii - np.floor(xcen)\n yloc = jj - np.floor(ycen)\n \n xweight = 0\n yweight = 0\n \n xoff = np.abs(ii - xcen)\n yoff = np.abs(jj - ycen)\n \n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n weight = xweight * yweight\n\n xmoment2 += xloc ** 2 * image[jj, ii] * weight\n xmoment3 += xloc ** 3 * image[jj, ii] * weight\n ymoment2 += yloc ** 2 * image[jj, ii] * weight\n ymoment3 += yloc ** 3 * image[jj, ii] * weight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n ymoment2 = ymoment2 / c_sum\n ymoment3 = ymoment3 / c_sum\n\n # Pack the x and y moments to return to main program\n x_moment = np.array((xmoment2, xmoment3))\n y_moment = np.array((ymoment2, ymoment3))\n \n return x_moment, y_moment", "def find1D_higher_moments(image, xcen, xhw, c_sum):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n xmoment2 = 0.0\n xmoment3 = 0.0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n\n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n xloc = (ii + 1) - np.floor(xcen)\n \n xweight = 0\n xoff = np.abs(ii - xcen)\n \n if xoff <= xhw:\n xweight = 0\n elif xhw < xoff < xhw + 1:\n xweight = xhw + 1 - xoff\n\n xmoment2 += xloc ** 2 * vector[ii] * xweight\n xmoment3 += xloc ** 3 * vector[ii] * xweight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n \n # Pack moments for return to main program\n x_mom = np.array((xmoment2, xmoment3))\n \n return x_mom", "def fun_contours(self, params):\n shape_coeffs = params[:self.num_shape_params]\n blendshape_end = self.num_shape_params + self.numObservations * self.num_blendshape_params\n blendshape_coeffs = params[self.num_shape_params:blendshape_end].reshape((self.numObservations, self.num_blendshape_params))\n trans_mats = params[blendshape_end:].reshape((self.numObservations, 7))\n\n vertices3d = self.vertices3d\n vertices3d_from_mesh = np.zeros_like(vertices3d)\n vertices3d_inner, vertices3d_right, vertices3d_left = self.transform_meshes(shape_coeffs, blendshape_coeffs, trans_mats)\n\n inner_idx = 0\n for idx in range(vertices3d.shape[0]):\n lm_idx = idx % 66\n obs_num = int(np.floor(idx/66))\n\n if lm_idx in self.contour_lms_list[0]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_right[obs_num])\n elif lm_idx in self.contour_lms_list[1]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_left[obs_num])\n else:\n vertices3d_from_mesh[idx] = vertices3d_inner[obs_num][inner_idx]\n inner_idx += 1\n if inner_idx == 50:\n inner_idx = 0\n\n return (vertices3d_from_mesh - vertices3d).ravel()", "def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)", "def contours(info, color, line, mean_marker):\n\teigenval, eigenvec = np.linalg.eigh(info['covar'])\n\n\taxis11, axis12 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 1)\n\taxis21, axis22 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 2)\n\taxis31, axis32 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 3)\n\tangle = axis12['xangle']\t\n\tangle = angle * 180 / math.pi\n\n\tellipse1 = Ellipse(xy=info['mean'], width=axis11['length'], height=axis12['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse2 = Ellipse(xy=info['mean'], width=axis21['length'], height=axis22['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse3 = Ellipse(xy=info['mean'], width=axis31['length'], height=axis32['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\n\tax = plt.gca()\n\tax.add_patch(ellipse3)\n\tax.add_patch(ellipse2)\n\tax.add_patch(ellipse1)\n\tax.set_xlim(-0.4, 0.4)\n\tax.set_ylim(0.5, 2.0)\n\tplt.plot(info['mean'][0], info['mean'][1], marker=mean_marker, mfc='none', mec=color, markersize=8, mew=2)\n\tsigma1 = {'ax1':axis11['length'], 'ax2':axis12['length'], 'xangle1':axis11['xangle'], 'xangle2':axis12['xangle']}\n\tsigma2= {'ax1':axis21['length'], 'ax2':axis22['length'], 'xangle1':axis21['xangle'], 'xangle2':axis22['xangle']}\n\tsigma3 = {'ax1':axis31['length'], 'ax2':axis32['length'], 'xangle1':axis31['xangle'], 'xangle2':axis32['xangle']}\n\n\treturn sigma1, sigma2, sigma3", "def extractFeatures(bwimage):\n \n \n # circularity\n img = bwimage.copy()\n img1, contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n if len(contours)==0:\n return []\n B = contours[0]\n C = B[:,0,0]\n l = C.size\n \n \n if abs(B[0,0,0] - B[l-1,0,0]) + abs(B[0,0,1] - B[l-1,0,1]) == 2:\n P8 = math.sqrt(2)\n else:\n P8 = 1 \n for j in range(0,l-1): \n if abs((B[j+1,0,0] - B[j,0,0])) + abs(B[j+1,0,1] - B[j,0,1]) == 2:\n P8 = P8 + math.sqrt(2)\n else:\n P8 = P8 + 1\n \n n = np.count_nonzero(bwimage)\n \n circularity = P8*P8/n\n \n \n # elongation\n idx = np.nonzero(bwimage);\n c = idx[1]\n r = idx[0]\n meanx = np.mean(c)\n meany = np.mean(r)\n \n \n pows = 2*np.ones(n)\n \n sigxx = np.sum(np.power((c-meanx),pows))/n\n sigyy = np.sum(np.power((r-meany),pows))/n\n sigxy = np.sum(np.multiply((r-meany),(c-meanx)))/n\n \n covMat = np.array([[sigxx, sigxy], [sigxy, sigyy]])\n val, vects = np.linalg.eig(covMat);\n \n maxEigenValue = np.amax(val) \n minEigenValue = np.amin(val.ravel()[np.flatnonzero(val)])\n \n \n elongation = math.sqrt(maxEigenValue/minEigenValue);\n \n \n # principal axis\n maxidx = np.argmax(val)\n principalAxisVector = vects[maxidx]\n \n \n return [circularity, elongation, principalAxisVector]", "def contour():\n # 'mayavi' is always defined on the interpreter.\n # Create a new scene.\n mayavi.new_scene()\n\n # Read a VTK (old style) data file.\n r = VTKFileReader()\n #filename = join(mayavi2.get_data_dir(dirname(abspath(__file__))),\n #'heart.vtk')\n filename = 'heart.vtk'\n r.initialize(filename)\n mayavi.add_source(r)\n\n # Create an outline for the data.\n o = Outline()\n mayavi.add_module(o)\n\n # Create three simple grid plane modules.\n # First normal to 'x' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n # Second normal to 'y' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n gp.grid_plane.axis = 'y'\n # Third normal to 'z' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n gp.grid_plane.axis = 'z'\n\n # Create one ContourGridPlane normal to the 'x' axis.\n cgp = ContourGridPlane()\n mayavi.add_module(cgp)\n # Set the position to the middle of the data.\n cgp.grid_plane.position = 15\n\n # Another with filled contours normal to 'y' axis.\n cgp = ContourGridPlane()\n mayavi.add_module(cgp)\n # Set the axis and position to the middle of the data.\n cgp.grid_plane.axis = 'y'\n cgp.grid_plane.position = 15\n cgp.contour.filled_contours = True\n\n # An isosurface module.\n iso = IsoSurface(compute_normals=True)\n mayavi.add_module(iso)\n iso.contour.contours = [220.0]\n\n # An interactive scalar cut plane.\n cp = ScalarCutPlane()\n mayavi.add_module(cp)\n cp.implicit_plane.normal = 0,0,1", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-x)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-y)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(self,connected=False,dimensionless=False):\n\n\t\t#First check that the instance has the gradient and hessian attributes; if not, compute them\n\t\tif not (hasattr(self,\"gradient_x\") and hasattr(self,\"gradient_y\")):\n\t\t\tself.gradient()\n\n\t\tif not (hasattr(self,\"hessian_xx\") and hasattr(self,\"hessian_yy\") and hasattr(self,\"hessian_xy\")):\n\t\t\tself.hessian()\n\n\t\t#Decide if using the full map or only the unmasked region\n\t\tif self._masked:\n\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\tdata = self.data[self._full_mask]\n\t\t\tgradient_x = self.gradient_x[self._full_mask]\n\t\t\tgradient_y = self.gradient_y[self._full_mask]\n\t\t\thessian_xx = self.hessian_xx[self._full_mask]\n\t\t\thessian_yy = self.hessian_yy[self._full_mask]\n\t\t\thessian_xy = self.hessian_xy[self._full_mask]\n\n\t\telse:\n\n\t\t\tdata = self.data\n\t\t\tgradient_x = self.gradient_x\n\t\t\tgradient_y = self.gradient_y\n\t\t\thessian_xx = self.hessian_xx\n\t\t\thessian_yy = self.hessian_yy\n\t\t\thessian_xy = self.hessian_xy\n\n\t\t\n\t\t#Quadratic moments\n\t\tsigma0 = data.std()\n\t\tsigma1 = np.sqrt((gradient_x**2 + gradient_y**2).mean())\n\n\t\t#Cubic moments\n\t\tS0 = (data**3).mean()\n\t\tS1 = ((data**2)*(hessian_xx + hessian_yy)).mean()\n\t\tS2 = ((gradient_x**2 + gradient_y**2)*(hessian_xx + hessian_yy)).mean()\n\n\t\t#Quartic moments\n\t\tK0 = (data**4).mean()\n\t\tK1 = ((data**3) * (hessian_xx + hessian_yy)).mean()\n\t\tK2 = ((data) * (gradient_x**2 + gradient_y**2) * (hessian_xx + hessian_yy)).mean()\n\t\tK3 = ((gradient_x**2 + gradient_y**2)**2).mean()\n\n\t\t#Compute connected moments (only quartic affected)\n\t\tif connected:\n\t\t\tK0 -= 3 * sigma0**4\n\t\t\tK1 += 3 * sigma0**2 * sigma1**2\n\t\t\tK2 += sigma1**4\n\t\t\tK3 -= 2 * sigma1**4\n\n\t\t\n\t\t#Normalize moments to make them dimensionless\n\t\tif dimensionless:\n\t\t\tS0 /= sigma0**3\n\t\t\tS1 /= (sigma0 * sigma1**2)\n\t\t\tS2 *= (sigma0 / sigma1**4)\n\n\t\t\tK0 /= sigma0**4\n\t\t\tK1 /= (sigma0**2 * sigma1**2)\n\t\t\tK2 /= sigma1**4\n\t\t\tK3 /= sigma1**4\n\n\t\t\tsigma0 /= sigma0\n\t\t\tsigma1 /= sigma1\n\n\t\t#Return the array\n\t\treturn np.array([sigma0,sigma1,S0,S1,S2,K0,K1,K2,K3])", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y", "def moments(data):\n total = data.sum()\n if total != 0.:\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n else:\n height=0\n x=0\n y=0\n width_x=0\n width_y=0\n return height,np.sqrt(width_x**2 + width_y**2)", "def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def calc_moments(field, lats, lons, xypoints, hemisphere='NH', field_type='GPH', \\\n edge=3.02e4, resolution='full'):\n print('Calculating for resolution: '+resolution)\n field_cart, x, y = sph_to_car(field,lons,lats,xypoints,resolution)\n field_vtx = isolate_vortex(field_cart, edge, field_type)\n \n aspect_ratio, latcent = moment_integrate(field_vtx, x, y,edge)\n \n return {'aspect_ratio':aspect_ratio, 'centroid_latitude':latcent}", "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic", "def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts", "def get_contour_centroid(contour):\n M = cv2.moments(contour)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)", "def moments(data,x0=None,y0=None):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n if x0 is None:\n return height, x, y, width_x, width_y, 0.0, 0.0\n else:\n xstep = x0[1] - x0[0]\n ystep = y0[1] - y0[0]\n return height, x*xstep+x0[0], y*ystep+y0[0], width_x*xstep, width_y*ystep, 0.0, 0.0", "def moments_display(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=None,y=None,z=None,regular=False,noise=False,exptime=100,mag=16.,sigma=4.):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n psf = rebin(hdui.data[i][4:].reshape(npix,npix),(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=img,sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n datam = data.copy()\n data = subMeanAll(data) # remove the mean of all moments except M20\n pl.figure(figsize=(11,11))\n pl.subplot(2,2,1)\n phi22 = 0.5*np.arctan2(data[:,3].imag,data[:,3].real)\n x = data[:,0].real\n y = data[:,1].real\n #phi22[x<0] = phi22+np.deg2rad(180)\n u = np.abs(data[:,3])*np.cos(phi22)\n v = np.abs(data[:,3])*np.sin(phi22)\n qvr = pl.quiver(x,y,u,v,width = 0.004, color='r',pivot='middle',headwidth=0.,headlength=0.,headaxislength=0.,scale_units='width')\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^2',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M22')\n pl.subplot(2,2,2)\n phi31 = np.arctan2(data[:,4].imag,data[:,4].real)\n u = np.abs(data[:,4])*np.cos(phi31)\n v = np.abs(data[:,4])*np.sin(phi31)\n qvr=pl.quiver(x,y,u,v,width=0.003,color='r',pivot='middle',headwidth=4)\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^3',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M31')\n pl.subplot(2,2,3)\n phi33 = np.arctan2(data[:,5].imag,data[:,5].real)/3.\n u = np.abs(data[:,5])*np.cos(phi33)\n v = np.abs(data[:,5])*np.sin(phi33)\n pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n u = np.abs(data[:,5])*np.cos(phi33+np.deg2rad(120))\n v = np.abs(data[:,5])*np.sin(phi33+np.deg2rad(120))\n pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n u = np.abs(data[:,5])*np.cos(phi33+np.deg2rad(240))\n v = np.abs(data[:,5])*np.sin(phi33+np.deg2rad(240))\n qvr=pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^3',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M33')\n pl.subplot(2,2,4)\n m20sqr = np.sqrt(data[:,2].real)\n x = data[:,0].real\n y = data[:,1].real\n m20sqr_med = np.median(m20sqr)\n m20sqr_diff = m20sqr - m20sqr_med\n m20sqr_diff_absmed = np.median(np.abs(m20sqr_diff))\n plotScale = 1./m20sqr_diff_absmed*100\n pos = m20sqr_diff >=0\n neg = m20sqr_diff < 0\n pl.scatter(x[pos],y[pos],s=m20sqr_diff[pos]*plotScale,c='r',alpha=0.5)\n pl.scatter(x[neg],y[neg],s=-m20sqr_diff[neg]*plotScale,c='b',alpha=0.5)\n pl.scatter(-230,-210,s=m20sqr_diff_absmed*plotScale,c='b',alpha=0.5)\n pl.text(-200,-215,'-'+str(round(m20sqr_diff_absmed,6))+' pix')\n pl.scatter(-230,-230,s=m20sqr_diff_absmed*plotScale,c='r',alpha=0.5)\n pl.text(-200,-235,str(round(m20sqr_diff_absmed,6))+' pix')\n pl.plot(x,y,'y,')\n pl.grid(color='g')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('median '+r'$\\sqrt{M20}$: '+str(round(scale*4*m20sqr_med,3))+' [arcsec]')\n return datam", "def get_centroid(moments):\n if moments['m00'] > 0:\n centroid_x = moments['m10']/moments['m00']\n centroid_y = moments['m01']/moments['m00']\n else:\n centroid_x = 0.0\n centroid_y = 0.0\n return centroid_x, centroid_y", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def center_of_mass(mask):\n M = cv2.moments(mask)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [int(cX), int(cY)]", "def init():\n sigma = np.random.uniform(0, 1, 4)\n Z1 = mlab.bivariate_normal(X, Y, sigma[0], sigma[1], 0.0, 0.0)\n Z2 = mlab.bivariate_normal(X, Y, sigma[2], sigma[3], 1, 1)\n Z = (Z1 - Z2) * 10\n\n norm = cm.colors.Normalize(vmax=abs(Z).max(), vmin=-abs(Z).max())\n cmap = cm.PRGn\n\n contf = plt.contourf(X, Y, Z, levels,\n cmap=cm.get_cmap(cmap, len(levels) - 1),\n norm=norm)\n\n return contf", "def center_directions(contours, image: ndarray):\n return contours_.contour_average_center(contours)", "def update_contour():\n global contour_center\n global contour_area\n\n image = rc.camera.get_color_image()\n\n if image is None:\n contour_center = None\n contour_area = 0\n else:\n # Find all of the orange contours\n contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])\n\n # Select the largest contour\n contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)\n\n if contour is not None:\n # Calculate contour information\n contour_center = rc_utils.get_contour_center(contour)\n contour_area = rc_utils.get_contour_area(contour)\n\n # Draw contour onto the image\n rc_utils.draw_contour(image, contour)\n rc_utils.draw_circle(image, contour_center)\n\n else:\n contour_center = None\n contour_area = 0\n\n # Display the image to the screen\n rc.display.show_color_image(image)", "def preprocessing(self, img):\n [a, contours, c] = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours", "def compute_moments_and_cgf(self, phi, mask=True):\n\n # Solve the Kalman filtering and smoothing problem\n y = phi[0]\n A = -2*phi[1]\n # Don't multiply phi[2] by two because it is a sum of the super- and\n # sub-diagonal blocks so we would need to divide by two anyway.\n B = -phi[2]\n\n (CovXnXn, CovXpXn, Xn, ldet) = linalg.block_banded_solve(A, B, y)\n\n # Compute moments\n u0 = Xn\n u1 = CovXnXn + Xn[...,:,np.newaxis] * Xn[...,np.newaxis,:]\n u2 = CovXpXn + Xn[...,:-1,:,np.newaxis] * Xn[...,1:,np.newaxis,:]\n u = [u0, u1, u2]\n\n # Compute cumulant-generating function\n g = -0.5 * np.einsum('...ij,...ij', u[0], phi[0]) + 0.5*ldet\n\n return (u, g)", "def find_center_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] == 0:\n (x, y), _ = cv2.minEnclosingCircle(contour)\n cR = int(y)\n cC = int(x)\n # raise ValueError(\"Contour too small to find a new center.\")\n else:\n cR = int(M[\"m01\"] / M[\"m00\"])\n cC = int(M[\"m10\"] / M[\"m00\"])\n return (cR, cC)", "def setup_flux_contours(self):\n e = self.e\n \n self.npsi = len(e.getRmidPsi()[self.tind])\n self.psigrid = np.linspace(0, 1, self.npsi)\n \n refineFactor = 4\n \n # This is the default psigrid to evaluate on\n psigridRefine = np.linspace(0, 1, self.npsi * refineFactor)\n self.psigridEv = (psigridRefine[1:] + psigridRefine[:-1])/2.0\n self.dpsigridEv = np.diff(psigridRefine)\n \n # This is an array of numpy arrays of (R,Z) coordinates which poloidally\n # parameterize the flux surface. Note that the last point is not equal\n # to the first point, so if a closed contour is desired, the last point\n # must be repeated\n self.psicontours = [None]*(len(self.psigridEv))\n \n # The following two are the length element (ds) and the flux surface\n # area element (ds/Bp) for each flux surface element given in psicontours.\n # Note that these are calculated as centered on the given R,Z coordinates.\n self.psicontours_ds = [None]*(len(self.psigridEv))\n # Note an additional very important fact; Bp has been normalized such\n # that psi_axis = 0 and psi_LCFS = 1!!!\n self.psicontours_dsbp = [None]*(len(self.psigridEv))\n \n rgrid = e.getRGrid()\n zgrid = e.getZGrid()\n \n # Begin by loading up basic magnetic geometry information\n self.magR = e.getMagR()[self.tind]\n self.magZ = e.getMagZ()[self.tind]\n self.magRZ = np.array([self.magR, self.magZ])\n \n self.fluxRZ = e.getFluxGrid()[self.tind,:,:]\n \n psiMin = e.getFluxAxis()[self.tind]\n psiMax = e.getFluxLCFS()[self.tind]\n self.psiRange = psiMax-psiMin\n \n self.psinormRZ = (self.fluxRZ-psiMin)/self.psiRange\n \n # Set up interpolation functions for the poloidal flux\n self.psifunc = scipy.interpolate.RectBivariateSpline(rgrid, zgrid, self.fluxRZ.T, kx=2, ky=2)\n self.psinormfunc = scipy.interpolate.RectBivariateSpline(rgrid, zgrid, self.psinormRZ.T, kx=2, ky=2)\n \n # Set up an upscaled version of the poloidal flux, since the conour finder\n # performs better on the upscaled version\n rgrid_upscaled = np.linspace(rgrid[0], rgrid[-1], len(rgrid)*4)\n zgrid_upscaled = np.linspace(zgrid[0], zgrid[-1], len(zgrid)*8)\n rmesh, zmesh = np.meshgrid(rgrid_upscaled, zgrid_upscaled)\n \n rfunc = scipy.interpolate.interp1d(range(len(rgrid_upscaled)), rgrid_upscaled)\n zfunc = scipy.interpolate.interp1d(range(len(zgrid_upscaled)), zgrid_upscaled)\n psinormRZ_upscaled = self.psinormfunc.ev(rmesh, zmesh)\n \n \n # Use skimage to get contours easily for us\n for i in range(len(self.psigridEv)):\n contours = measure.find_contours(psinormRZ_upscaled, self.psigridEv[i])\n if len(contours) == 1:\n self.psicontours[i] = np.array([rfunc(contours[0][:,1]), zfunc(contours[0][:,0])])\n elif len(contours) > 1:\n minDist = np.inf\n for c in contours:\n rz = np.array([rfunc(c[:,1]), zfunc(c[:,0])])\n \n if np.linalg.norm(np.mean(rz, axis=1) - self.magRZ) < minDist:\n self.psicontours[i] = rz\n minDist = np.linalg.norm(np.mean(rz, axis=1) - self.magRZ)\n else:\n self.psicontours[i] = np.array([self.magRZ]).T\n \n if self.plotting:\n plt.figure()\n \n # Resample the contours such that the point spacings are of approximately equal arclength\n for i in range(len(self.psigridEv)):\n cRZ = self.psicontours[i]\n \n ctheta = np.unwrap(np.arctan2(cRZ[1] - self.magZ, cRZ[0] - self.magR))\n cr = np.linalg.norm(cRZ - self.magRZ[:,np.newaxis], axis=0)\n \n dtheta = np.unwrap(np.diff(ctheta))\n dr = np.diff(cr)\n r = (cr[1:] + cr[:-1])/2.0\n \n # Calculate total contour arclength using ds**2 = dr**2 + (r*dtheta)**2\n ds = np.sqrt(dr**2 + (r*dtheta)**2)\n cs = np.concatenate(([0], np.cumsum(ds)))\n \n cthetaFunc = scipy.interpolate.interp1d(cs, ctheta)\n crFunc = scipy.interpolate.interp1d(cs, cr, kind='quadratic')\n \n # Resample to an odd number of points\n resamp_theta = cthetaFunc(np.linspace(0, cs[-1], (len(cr)/2)*2+1))\n resamp_r = crFunc(np.linspace(0, cs[-1], (len(cr)/2)*2+1))\n \n dtheta2 = np.diff(resamp_theta[::2])\n dr2 = np.diff(resamp_r[::2])\n \n cr2 = resamp_r[1::2]\n ctheta2 = resamp_theta[1::2]\n \n ds2 = np.sqrt(dr2**2 + (cr2*dtheta2)**2)\n \n resampled_cRZ = np.array([np.cos(ctheta2), np.sin(ctheta2)]*cr2[np.newaxis,:]) + self.magRZ[:,np.newaxis]\n \n if self.plotting:\n plt.plot(resampled_cRZ[0,:], resampled_cRZ[1,:], marker='.')\n \n self.psicontours[i] = resampled_cRZ\n self.psicontours_ds[i] = ds2\n \n # This equation is abs(del(psi) cross del(toroidal angle))\n br_norm = -self.psinormfunc.ev(resampled_cRZ[0,:], resampled_cRZ[1,:], dy=1)\n bz_norm = self.psinormfunc.ev(resampled_cRZ[0,:], resampled_cRZ[1,:], dx=1)\n bp_norm = np.sqrt(br_norm**2 + bz_norm**2)/resampled_cRZ[0,:]\n self.psicontours_dsbp[i] = ds2/bp_norm", "def huMoments(cnt):\n\treturn cv2.HuMoments(moments(cnt))", "def moments(data):\n\n data = np.absolute(data)\n total = data.sum()\n X = np.indices(data.shape)\n x = (X*data).sum()/total\n width = np.sqrt((((X-x)**2)*data).sum()/data.sum())\n m_max = data.max()\n m_min = data.min()\n if np.absolute(m_max) >= np.absolute(m_min):\n height = m_max\n else:\n height = m_min\n return height, x, width", "def moments(path):\n\n g = from_file(path)\n\n h = 1.0 - g\n\n m1 = bgy3d.moments1(h)\n\n # Get the center of distribution\n center = m1[1:4] / m1[0]\n\n # Use center to compute 2nd momenta\n m2 = bgy3d.moments2nd(h, center)\n\n print \"Moments from\", path\n print \"<1> = \", m1[0]\n print \"<x> = \", m1[1] / m1[0]\n print \"<y> = \", m1[2] / m1[0]\n print \"<z> = \", m1[3] / m1[0]\n print \"<xy> = \", m2[0] / m1[0]\n print \"<yz> = \", m2[1] / m1[0]\n print \"<zx> = \", m2[2] / m1[0]\n print \"<z^2 - 1/3 * r^2> = \", m2[3] / m1[0]\n print \"<x^2 - y^2> = \", m2[4] / m1[0]\n print \"<r^2> = \", m2[5] / m1[0]", "def complexMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Mrrr = np.sum(rowgrid**3*IWrow)/IWsum\n Mccc = np.sum(colgrid**3*IWcol)/IWsum\n Mrrc = np.sum(np.outer(rowgrid**2,colgrid)*IWmat)/IWsum\n Mrcc = np.sum(np.outer(rowgrid,colgrid**2)*IWmat)/IWsum\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n M31 = complex(3*Mc - (Mccc+Mrrc)/sigma**2, 3*Mr - (Mrcc + Mrrr)/sigma**2)\n M33 = complex(Mccc-3*Mrrc, 3.*Mrcc - Mrrr)\n return M20, M22, M31, M33", "def process(img):\n global start\n frame = cv2.GaussianBlur(img, (21, 21), 0)\n fgmask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fgmask = cv2.absdiff(start, fgmask)\n avg = max(np.average(fgmask), 10)\n fgmask = cv2.dilate(fgmask, None, iterations=2)\n ret, fgmask = cv2.threshold(fgmask, avg, 255, cv2.THRESH_BINARY)\n image, contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n bigContours = []\n for contour in contours:\n if cv2.contourArea(contour) >= 3000:\n bigContours.append(contour)\n\n ax = 0\n ay = 0\n for contour in bigContours:\n moments = cv2.moments(contour)\n cx = int(moments['m10']/moments['m00'])\n cy = int(moments['m01']/moments['m00'])\n ax += cx\n ay += cy\n if not bigContours:\n speed = 0\n else:\n ax /= len(bigContours)\n ay /= len(bigContours)\n my, mx, channels = img.shape\n my /= 2\n mx /= 2\n dist = math.sqrt((ax - mx)**2 + (ay - my)**2)\n speed = max(min((mx - dist) / my, 1), 0.1)\n if speed > 0.8:\n speed = 1\n return speed", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def complex2ndMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n #M20 = Mrr + Mcc\n #M22 = complex(Mcc - Mrr,2*Mrc)\n return Mcc, Mrr, Mrc", "def contours(self, image,debug=False):\n imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n if debug: cv2.imwrite('debug_pics/gray_scale_contour.jpg',imgray) # cv2.imshow('gray_scale_contour',imgray)\n im2, contours, hierarchy = cv2.findContours(imgray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n return contours,hierarchy", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))", "def Predict_Image_Contours(img, mask_full, feature_dict, filename = None, path = ''):\n for ii in range(len(feature_dict)):\n Type = feature_dict[str(ii)] ## So first key is 1\n if Type=='modern_build':\n color_rgb = (255,0,0)\n elif Type=='trad_build':\n color_rgb = (0,0,255)\n mask = mask_full[:,:,ii]\n mask = 255*mask.round().astype('uint8')\n mask = np.stack((mask,mask, mask),-1)\n mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY);\n ret, thresh = cv2.threshold(mask, 127.5, 255, cv2.THRESH_BINARY)\n\n contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n ##print('here')\n area_thresh =30 ## Depends on what the minimal building size desired is\n for cnt in contours:\n ## Contours, flag of whether curve is closed or not\n epsilon = 0.025*cv2.arcLength(cnt,True)\n ## Contours, epsilon for wiggliness, closed shape or not\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n ## Extract Area Dest image, contours, contour index\n area = cv2.contourArea(approx)\n ## centroid computed from moments\n M = cv2.moments(cnt) \n if area > area_thresh:\n if Type=='modern_build':\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n img = cv2.drawContours(image = img, contours = [box], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n elif Type=='trad_build':\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n center = (int(x),int(y))\n radius = int(radius)\n img = cv2.circle(img,center,radius,color_rgb,2)\n elif Type=='Forest':\n img = cv2.drawContours(image = img, contours = [cnt], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n elif Type=='Bare':\n img = cv2.drawContours(image = img, contours = [cnt], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n if filename is not None:\n try: \n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img[:,:,0:3])\n plt.tight_layout()\n plt.savefig(path + '/' + filename, bbox_inches='tight') \n plt.close(fig)\n \n return img", "def get_cntr_points_center(self, contour, img_size):\n res_arr = []\n \n # first border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = 0\n while curr_row != -1:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row -1\n curr_col = curr_col +1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n \n # second border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = img_size -1\n while curr_row != img_size:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row +1\n curr_col = curr_col -1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n return res_arr", "def cluster_contours(device, img, roi_objects, nrow=1, ncol=1, debug=None):\n\n device += 1\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n else:\n iy, ix, = np.shape(img)\n\n # get the break groups\n\n if nrow == 1:\n rbreaks = [0, iy]\n else:\n rstep = np.rint(iy / nrow)\n rstep1 = np.int(rstep)\n rbreaks = range(0, iy, rstep1)\n if ncol == 1:\n cbreaks = [0, ix]\n else:\n cstep = np.rint(ix / ncol)\n cstep1 = np.int(cstep)\n cbreaks = range(0, ix, cstep1)\n\n # categorize what bin the center of mass of each contour\n\n def digitize(a, step):\n if isinstance(step, int) == True:\n i = step\n else:\n i = len(step)\n for x in range(0, i):\n if x == 0:\n if a >= 0 and a < step[x + 1]:\n return x + 1\n elif a >= step[x - 1] and a < step[x]:\n return x\n elif a > step[x - 1] and a > np.max(step):\n return i\n\n dtype = [('cx', int), ('cy', int), ('rowbin', int), ('colbin', int), ('index', int)]\n coord = []\n for i in range(0, len(roi_objects)):\n m = cv2.moments(roi_objects[i])\n if m['m00'] == 0:\n pass\n else:\n cx = int(m['m10'] / m['m00'])\n cy = int(m['m01'] / m['m00'])\n # colbin = np.digitize(cx, cbreaks)\n # rowbin = np.digitize(cy, rbreaks)\n colbin = digitize(cx, cbreaks)\n rowbin = digitize(cy, rbreaks)\n a = (cx, cy, colbin, rowbin, i)\n coord.append(a)\n coord1 = np.array(coord, dtype=dtype)\n coord2 = np.sort(coord1, order=('colbin', 'rowbin'))\n\n # get the list of unique coordinates and group the contours with the same bin coordinates\n\n groups = []\n for i, y in enumerate(coord2):\n col = y[3]\n row = y[2]\n location = str(row) + ',' + str(col)\n groups.append(location)\n\n unigroup = np.unique(groups)\n coordgroups = []\n\n for i, y in enumerate(unigroup):\n col = int(y[0])\n row = int(y[2])\n for a, b in enumerate(coord2):\n if b[2] == col and b[3] == row:\n grp = i\n contour = b[4]\n coordgroups.append((grp, contour))\n else:\n pass\n\n coordlist = [[y[1] for y in coordgroups if y[0] == x] for x in range(0, (len(unigroup)))]\n\n contours = roi_objects\n grouped_contour_indexes = coordlist\n\n # Debug image is rainbow printed contours\n\n if debug == 'print':\n if len(np.shape(img)) == 3:\n img_copy = np.copy(img)\n else:\n iy, ix = np.shape(img)\n img_copy = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n rand_color = color_palette(len(coordlist))\n for i, x in enumerate(coordlist):\n for a in x:\n cv2.drawContours(img_copy, roi_objects, a, rand_color[i], -1, lineType=8)\n print_image(img_copy, (str(device) + '_clusters.png'))\n\n elif debug == 'plot':\n if len(np.shape(img)) == 3:\n img_copy = np.copy(img)\n else:\n iy, ix = np.shape(img)\n img_copy = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n rand_color = color_palette(len(coordlist))\n for i, x in enumerate(coordlist):\n for a in x:\n cv2.drawContours(img_copy, roi_objects, a, rand_color[i], -1, lineType=8)\n plot_image(img_copy)\n\n return device, grouped_contour_indexes, contours", "def moment_3d(im, mesh, thresh=0):\n x = []\n y = []\n z = []\n for mark in range(im.shape[3]):\n # get normalized probabilities\n im_norm = (im[:, :, :, mark] * (im[:, :, :, mark] >= thresh)) / np.sum(\n im[:, :, :, mark] * (im[:, :, :, mark] >= thresh)\n )\n x.append(np.sum(mesh[0] * im_norm))\n y.append(np.sum(mesh[1] * im_norm))\n z.append(np.sum(mesh[2] * im_norm))\n return x, y, z", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def moments3d(data):\n # Find total for all values in the data.\n total = data.sum()\n \n # Make index matrices.\n Z, X, Y = np.indices(data.shape)\n \n # Find mean positions in each dimension by weighted average (weight is intensity, index is position)\n z = (Z*data).sum()/total\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n \n # Estimate width in each dimension. Procedure is to fix the other two dimensions at their mean\n # and retrieve a single column in the dimension of interest through the peak. Visually, in a Z-\n # stack you would determine the X,Y position of the center of the peak, then we're drawing a line\n # (or a bar) in Z through that point. This becomes a simple 1D profile of intensity as a function\n # of Z position. Standard deviation of this 1D vector about z (mean Z position) is computed.\n z_col = data[:, int(x), int(y)] #single column through z with x and y fixed at their means.\n width_z = np.sqrt(np.abs((np.arange(z_col.size)-z)**2*z_col).sum()/z_col.sum())\n x_col = data[int(z), :, int(y)] #single column through x with z and y fixed at their means.\n width_x = np.sqrt(np.abs((np.arange(x_col.size)-x)**2*x_col).sum()/x_col.sum())\n y_col = data[int(z), int(x), :] #single column through y with z and x fixed at their means.\n width_y = np.sqrt(np.abs((np.arange(y_col.size)-y)**2*y_col).sum()/y_col.sum())\n \n # Estimator height from max value.\n height = data.max()\n return z, x, y, height, width_z, width_x, width_y", "def get_cntr_points_from_center(self, contour, img_size):\n res_arr = []\n half_img_size = int(np.round(img_size/3))\n diag_coord = np.random.randint(half_img_size,2*half_img_size)\n # find the center point\n center_point = [diag_coord,diag_coord]\n # define all borders point\n border = ([[0,i] for i in np.arange(0,img_size)] + \n [[img_size,i] for i in np.arange(0,img_size)] +\n [[i,0] for i in np.arange(0,img_size)] + \n [[i,img_size] for i in np.arange(0,img_size)])\n for point in border:\n tmp_res = []\n row_diff = point[0] - center_point[0]\n col_diff = point[1] - center_point[1]\n # find the distance between center point and border point\n dist = int(np.round(np.sqrt(row_diff*row_diff+col_diff*col_diff)))\n # find steps for row and column to go from center to border\n row_step = row_diff/dist\n col_step = col_diff/dist\n \n row_diff = center_point[0]\n col_diff = center_point[1]\n c_point = [int(np.round(row_diff)),\n int(np.round(col_diff))]\n\n # go from center to border until points inside image\n while ((c_point[0]>= 0) \n and (c_point[0] < img_size) \n and (c_point[1]>= 0) \n and (c_point[1] < img_size)):\n if cv2.pointPolygonTest(contour,(c_point[0],c_point[1]),True)> 0:\n tmp_res.append(c_point)\n row_diff = row_diff + row_step\n col_diff = col_diff + col_step\n c_point = [int(np.round(row_diff)),\n int(np.round(col_diff))]\n if len(tmp_res):\n res_arr.append(tmp_res)\n return res_arr", "def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)", "def _compute_layer_moments(x):\n return torch.mean(x, dim=(1, 2, 3), keepdim=True), torch.var(x, dim=(1, 2, 3), keepdim=True)", "def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]", "def image_proc(self):\r\n res_erode, res_in_rect = self._get_res_image()\r\n\r\n Moment_rect = cv2.moments(res_in_rect)\r\n if (Moment_rect[\"m00\"] != 0):\r\n self.rect_x = int(Moment_rect[\"m10\"] / Moment_rect[\"m00\"])\r\n self.rect_y = int(Moment_rect[\"m01\"] / Moment_rect[\"m00\"])\r\n\r\n # afin de trouver les centres de la rectangulaire.\r\n cnts, hierarchy = cv2.findContours(res_erode,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for i in cnts:\r\n #Moment = cv2.moments(cnts)\r\n moment = cv2.moments(res_erode)\r\n self.cx = int(moment[\"m10\"] / moment[\"m00\"])\r\n self.cy = int(moment[\"m01\"] / moment[\"m00\"])\r\n cv2.circle(output_image,(self.cx,self.cy),7,(255,255,255),-1)\r\n cv2.putText(output_image, \"center\", (self.cx, self.cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n #print(cX,cY)\r", "def props_for_contours(contours, ary):\n c_info = []\n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n c_im = np.zeros(ary.shape)\n cv2.drawContours(c_im, [c], 0, 255, -1)\n c_info.append({\n 'x1': x,\n 'y1': y,\n 'x2': x + w - 1,\n 'y2': y + h - 1,\n 'sum': np.sum(ary * (c_im > 0)) / 255\n })\n return c_info", "def get_sun_features(image): # Use grayscale images, outside val: NaN\r\n ratio = sun_isoperimetric_ratio(image)\r\n sun_features = {\"sun_circularity_ratio\": ratio}\r\n return sun_features", "def centres_of_mass_2D(image):\n centroids = []\n bords = []\n areas = []\n radius = []\n \n for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'Area', 'equivalent_diameter', 'Label']): \n \n # Skip wrong regions\n index = np.where(image==info['Label'])\n if index[0].size==0 & index[1].size==0:\n continue\n \n # TODO: change this value\n if info['Area'] > image.shape[0] / 4.:\n \n \n centre = info['Centroid']\n D = info['equivalent_diameter']\n \n #min_row, min_col, max_row, max_col = info['BoundingBox']\n #a1 = int((max_row - min_row) / 2.)\n #a2 = int((max_col - min_col) / 2.)\n \n #box_cent = (a1 + min_row, a2 + min_col)\n \n radius.append(round(D / 2.0, 3))\n centroids.append( (round(centre[0], 3),round(centre[1], 3)) )\n #bords.append(box_cent)\n\n return [centroids, radius]", "def getCenterOfMass(self, filtered = True):\n n_time = len(self.pos)\n x_mean = [0.0,]*n_time\n y_mean = [0.0,]*n_time\n z_mean = [0.0,]*n_time \n for frame in range(n_time):\n # get all the positions of the filtered points\n x,y,z = self.getAllPositions(frame, filtered)\n x_mean[frame] = np.asarray(x).mean() if len(x) > 0 else None\n y_mean[frame] = np.asarray(y).mean() if len(y) > 0 else None\n z_mean[frame] = np.asarray(z).mean() if len(z) > 0 else None\n\n return x_mean, y_mean, z_mean", "def select_contours(img):\n # Find contours at a constant value\n contours = measure.find_contours(img, 300)\n # print(\"Found \" + str(len(contours)) + \" contour(s)\")\n # Select the nearest contours with respect to the center pixel of the image\n width = img.shape[1] # number of columms\n heigth = img.shape[0] # number of rows\n pixel_ref = (width / 2, heigth / 2)\n # Threshold distance is 10% of images smallest dimension\n dist_thresh = min(width, heigth) * 0.1\n contours_wanted = []\n pixel_mean_array = []\n for contour in contours:\n contour_3d = np.zeros([contour.shape[0], 3]) # 3rd dimension added for later conversion to patient coord space\n contour_3d[:, :2] = contour\n pixel_mean = np.mean(contour, axis=0)\n if distance.euclidean(pixel_ref, pixel_mean) <= dist_thresh:\n contours_wanted.append(contour_3d)\n pixel_mean_array.append(pixel_mean)\n # print(\"Set \" + str(len(contours_wanted)) + \" contours of interest\")\n return contours_wanted, pixel_mean_array", "def moments(self, x, y):\n # Let's attempt to remove noise by axing points\n # below 5% of the peak\n threshold = y.max() * 0.05\n clean = np.array(y)\n clean[clean < threshold] = 0\n zero = clean.sum()\n one = np.sum(x * clean)\n two = np.sum(x * x * clean)\n mean = one / zero\n var = two / zero - mean**2\n stdev = np.sqrt(var)\n return (mean, stdev)", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def setup_contour_input():\n from bfieldtools.utils import load_example_mesh\n\n mesh = load_example_mesh(\"unit_disc\")\n\n r = np.linalg.norm(mesh.vertices, axis=1)\n scalars = (1 - r) ** 2\n scalars *= mesh.vertices[:, 0]\n\n return mesh, scalars", "def getContours(image, copyImage):\n contours, heirarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for contour in contours:\n area = cv.contourArea(contour)\n \n if area > 500.0:\n cv.drawContours(copyImage, contour, -1, (255,0,0),3)\n perimeter = cv.arcLength(contour, True)\n \n # Approximates to the nearest polygon\n approx = cv.approxPolyDP(contour,0.02*perimeter, True)\n objectCoordinates = len(approx)\n\n # Returns the x, y and height width of the polygon\n x, y, w, h = cv.boundingRect(approx)\n\n if objectCoordinates == 3:\n objectShape = \"Triangle\"\n elif objectCoordinates == 4:\n ratio = w / float(h)\n if ratio >= 0.95 and ratio <= 1.05:\n objectShape = \"Square\"\n else: objectShape = \"Rectangle\"\n else: objectShape = \"Circle\" \n\n \n\n # Draw rectangles around the images\n cv.rectangle(copyImage, (x,y), (x+w, y+h), (0,255,0), 2)\n cv.putText(copyImage, objectShape, (x + (w//2), y + (h//2)),cv.FONT_HERSHEY_COMPLEX, 0.5, (0,0,0))", "def get_objects(color, depth, threshold1, threshold2):\n\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n surf = cv2.xfeatures2d.SURF_create(500)\n\n # find and draw the keypoints\n kp = surf.detect(blur,None)\n\n pts = [p.pt for p in kp]\n xpts = []\n ypts = []\n\n # evaluate the keypoints and only save the keypoints who are between the given threshold\n depth_values = []\n for i in range(0,len(pts)):\n xco = int(pts[i][0])\n yco = int(pts[i][1])\n depth_value = depth[yco][xco]\n if depth_value >= float(threshold1) and depth_value <= float(threshold2):\n xpts.append(xco)\n ypts.append(yco)\n depth_values.append(depth_value)\n\n # make histogram of x coordinates of the saved keypoints\n n, distr, _ = plt.hist(xpts)\n plt.savefig('hist.png')\n\n # evaluate the histogram and make seperate arrays for the different objects\n objectarray = []\n temp = []\n for i in range(len(n)):\n if n[i] > 0:\n temp.append(distr[i])\n temp.append(distr[i+1])\n else:\n if len(temp)!=0:\n objectarray.append(temp)\n temp = []\n objectarray.append(temp)\n\n objects = []\n\n # determine the objects with the previous calculated arrays\n for i in range(len(objectarray)):\n y_values = []\n min_x = int(np.amin(objectarray[i]))\n max_x = int(np.amax(objectarray[i]))\n\n for j in range(len(xpts)):\n if xpts[j] > min_x and xpts[j] < max_x:\n y_values.append(ypts[j])\n\n min_y = int(np.amin(y_values))\n max_y = int(np.amax(y_values))\n x = min_x\n y = min_y\n w = max_x - min_x\n h = max_y - min_y\n\n depth_mean = round(get_depth_mean(depth, x, y, w, h), 3)\n\n object = DetectedObject(x, y, w, h, depth_mean)\n objects.append(object)\n\n return objects", "def detect_contours(self):\r\n (contours, _) = cv2.findContours(self.image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n return [DbugContour(cv_contour=contour) for contour in contours]", "def calculate_contours_fit(L_x, L_y, e, leaflet, ts, Plots, side):\n \n n = np.load(input_dir + 'directors_'+leaflet+'_tail_'+ str(ts) + '.npy') \n\n pos = np.load(input_dir + 'coordinates_'+leaflet+'_tail_' + str(ts) + '.npy') \n\n resid = np.load(input_dir + 'residues_'+leaflet+'_tail_' + str(ts) + '.npy')\n box = np.load(input_dir + 'box' + str(ts) + '.npy')\n\n \n chl = np.load(input_dir + 'cholesterol_'+leaflet+'_tail_' + str(ts) + '.npy')\n dlipc = np.load(input_dir + 'dlipc_'+leaflet+'_tail_' + str(ts) + '.npy') \n dspc = np.load(input_dir + 'dspc_'+leaflet+'_tail_' + str(ts) + '.npy')\n ssm = np.load(input_dir + 'ssm_'+leaflet+'_tail_' + str(ts) + '.npy')\n \n #n= np.ones(len(pos))\n \"\"\" END: read the input data \"\"\"\n\n\n field = order_vector_field(L_x, L_y, pos, n, e, box)\n\n c = pd.DataFrame(data=field).mean(axis=0).rolling(50, center=True, min_periods=1).mean() #50\n c.dropna(inplace=True)\n middle = 0.5*(np.max(c) + np.min(c)) \n #middle = 0.025\n contours = measure.find_contours(field, middle) # Marching Cubes algorith\n #save contours\n fac_x = box[0] / L_x #to get the right dimensions (range_x)\n fac_y = box[1] / L_y # (range_y)\n \n contours_x = []\n contours_y = []\n contours_x_y = []\n \n contours_all = []\n for m, contour in enumerate(contours):\n contours_x.append((contour[:, 1] * fac_x))\n contours_y.append((contour[:, 0] * fac_y))\n \n \n contours_x_y = np.column_stack((contours_x[m], contours_y[m]))\n contours_all.append(contours_x_y)\n np.save(output_contours + 'contours_'+leaflet+'.' + str(ts) + '.npy', contours_all)\n \n\n#===================================================\n#To assign resids to the different phases\n phase_belonging = np.zeros((len(pos)))\n ordered =[]\n disordered = []\n for i in np.arange(len(pos)):\n \n def apply_pbc(pos, box):\n if pos >= box:\n pos -= box\n if pos < 0:\n pos += box\n return pos\n \n idx_x = int(apply_pbc(pos[i,0], box[0]) / fac_x - 1.e-5) #the - 1.e-5 is because accuracy issue in the /\n idx_y = int(apply_pbc(pos[i,1], box[1]) / fac_y - 1.e-5) #this - 1.e-5 is because accuracy issue in the /\n #print(idx_x, idx_y)\n order= field[idx_y, idx_x]\n if (order > middle):\n ordered.append(order)\n order = 1 #ordered lipids\n \n else :\n disordered.append(order)\n order =0 #disordered lipids\n phase_belonging[i] = order\n \n\n resid_phases = np.column_stack((resid[:,0], phase_belonging))\n np.save(output_dir + 'resid_phases'+leaflet+'.'+ str(j) + '.npy', resid_phases)\n\n if Plots == True:\n plt.figure(figsize=(15,10)) \n \n contours_sorted = sorted(contours, key=len, reverse=True)\n \n for i in range(2):\n plt.plot(contours_sorted[i][:,1]* fac_x+0.5*fac_x, contours_sorted[i][:,0]* fac_y+0.5*fac_y, linewidth=3, color='#0000FF' ) ##00CC00\n \n #for m, contour in enumerate(contours_sorted):\n # print(contour[:,0])\n # for contour in contours: \n \n # plt.plot((contour[:, 1] * fac_x+0.5*fac_x),\n # (contour[:, 0] * fac_y+0.5*fac_y),\n # linewidth=4, color='#00CC00')\n \n plt.imshow(field, interpolation='nearest', \n cmap=plt.cm.gray_r,\n extent=[0, box[0], 0, box[1]], origin='lower', alpha=0.7) \n \n plt.axis('off')\n plot_scatter_order_field(pos, resid, dlipc, dspc, chl,ssm, n , box, ts, side) #phase_belonging.reshape(-1,1)\n plt.savefig(output_dir + 'contours-'+ leaflet + str(ts) + '.png', dpi=300) \n plt.close() \n \n return resid_phases #, ordered, disordered ", "def getContours(img):\n # mode: gets only exrteme outer contours\n # method: stores all contour points\n contours, hierarchy = cv2.findContours(img, mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_NONE)\n \n x, y, w, h = 0, 0, 0, 0\n\n # improving accuracy of contours\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 500:\n # assumes cnt with area > 500 is closed contour \n perimeter = cv2.arcLength(cnt, True)\n\n # approximate polygonal curve of cnt\n # set max difference between original and approxCurve as \n # 0.02 * perimeter\n approxCurve = cv2.approxPolyDP(cnt, 0.02 * perimeter, closed=True)\n \n x, y, w, h = cv2.boundingRect(approxCurve)\n # wand point is is center-left tip\n return x + (w // 2), y", "def findCentroid(img, file):\n \n print(file)\n \n # convert the image to grayscale\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # find the number of rows and columns of the image \n img_lin = img.shape[0]\n img_col = img.shape[1]\n \n # to find the average of half of the image \n img_mean = np.uint8(np.mean(img[0:700,:]))\n \n threshold_img = np.zeros_like(img)\n \n # we set a threshold to detect the fly's body at 70% of the average\n for i in range(1,img_lin): \n for j in range(1,img_col):\n if img[i,j] <= img_mean*0.70:\n threshold_img[i,j] = 255\n \n clean_threshold = threshold_img\n\n # erosion applied to remove unwanted details, like the lanes borders \n kernel = np.ones((3,3), np.uint8)\n img_erosion = cv2.erode(threshold_img, kernel, iterations=5)\n\n clean_erosion = img_erosion\n \n wings_img = np.zeros_like(img)\n\n # thresholding to detect the fly's body with wings \n for i in range(1,img_lin): \n for j in range(1,img_col):\n if img[i,j] <= img_mean*0.90 and img[i,j] >= img_mean*0.50:\n wings_img[i,j] = 255\n \n clean_wings = wings_img\n \n # erosion and dilation to the the fly's body and wings \n wings_erosion = cv2.erode(wings_img, kernel, iterations=2)\n clean_wings_erosion = wings_erosion\n wings_erode_dilate = cv2.dilate(clean_erosion, kernel, iterations=10)\n \n final_img = np.zeros_like(img)\n\n for i in range(1,img_lin): \n for j in range(1,img_col):\n if wings_erode_dilate[i,j] == 255 and clean_wings[i,j] == 255 :\n final_img[i,j] = 255\n \n # final image with only the fly's wings\n final_img = final_img - clean_threshold\n \n img_sample = img_erosion\n\n # the centroid detection by using connected components \n output = cv2.connectedComponentsWithStats(img_sample, 4, cv2.CV_32S) \n \n x_centroid = int(output[3][1][0])\n y_centroid = int(output[3][1][1])\n \n remove_value = False\n add_value = False\n \n # we segment the image in two, based on the location of the centroid\n # we take a small square of pixels, to have a more precise detection\n # this squre is 55 x 55 pixels around the centroid \n # (if the fly is not at the border)\n if x_centroid-55 > 0:\n part_left = final_img[30:100,(x_centroid-55):x_centroid]\n part_left_track = img_erosion[30:100,(x_centroid-55):x_centroid]\n remove_value = True\n else:\n part_left = final_img[30:100,:x_centroid]\n part_left_track = img_erosion[30:100,:x_centroid]\n \n if x_centroid+55 < len(final_img):\n part_right = final_img[30:100,x_centroid:x_centroid+55]\n part_right_track = img_erosion[30:100,x_centroid:x_centroid+55]\n add_value = True\n else:\n part_right = final_img[30:100,x_centroid:]\n part_right_track = img_erosion[30:100,x_centroid:]\n \n axis_left_x = part_left_track.shape[0] \n axis_left_y = part_left_track.shape[1]\n \n # we count the number of white pixels in the left part of the image\n white_left = 0\n \n for i in range(axis_left_x): \n for j in range(axis_left_y):\n if part_left[i,j] >= 50 :\n white_left = white_left + 1\n \n print('Part left scored : ' + str(white_left) + ' white pixels.')\n \n axis_right_x = part_right_track.shape[0]\n axis_right_y = part_right_track.shape[1]\n\n # we count the number of white pixels in the right part of the image\n white_right = 0\n \n for i in range(axis_right_x): \n for j in range(axis_right_y):\n if part_right[i,j] >= 50 :\n white_right = white_right + 1\n \n print('Part right scored : ' + str(white_right) + ' white pixels.')\n \n x_head = 0\n y_head = 0\n \n # the part having the smallest number of white pixels corresponds to the head\n \n if white_left < white_right:\n print('Head is in part left')\n for i in range(0,axis_left_x): \n for j in range(0,axis_left_y):\n if part_left_track[i,j] == 255 and x_head == 0:\n \n if remove_value:\n x_head = i+x_centroid-55\n else:\n x_head = i+x_centroid\n\n y_head = j\n print(\"head is in part left : \"+ str(np.array([i,j])))\n else:\n axis_x = part_right_track.shape[1]\n axis_y = part_right_track.shape[0]\n for i in reversed(range(0,axis_x)): \n for j in reversed(range(0,axis_y)):\n if part_right_track[j,i] == 255 and x_head == 0:\n \n if add_value:\n x_head = i+x_centroid+55\n else:\n x_head = i+x_centroid\n \n x_head = i+x_centroid\n y_head = j\n print(\"head is in part right : \"+ str(np.array([i,j])))\n \n return x_centroid, y_centroid, x_head, y_head", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def centroids(img):\n _, _, _, centr = cv2.connectedComponentsWithStats(img)\n return centr[1:]", "def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask", "def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_instance_moments(x):\n return torch.mean(x, dim=(2, 3), keepdim=True), torch.var(x, dim=(2, 3), keepdim=True)", "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def draw_contours(self, image, maskImg):\r\n # Required variables..\r\n x, y, width, height = 0, 0, 0, 0\r\n # Find contours..\r\n contours, hierarchy = cv2.findContours(image=maskImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # Playable Parameters..\r\n # Draw the contours..\r\n for contour in contours:\r\n # Calculate the area of the contour, so can remove unnecessary contours..\r\n area = cv2.contourArea(contour=contour)\r\n if area > 3000: # Playable adjustment..!! Found Good as 3000 for current light condition.. change this if light condition changes..\r\n # Draw the contours to the image -- actual frame..\r\n if self.debug_mode:\r\n cv2.drawContours(image=image, contours=contour, contourIdx=-1, color=(255, 255, 0), thickness=4)\r\n # Find the perimeter of the markers detected...\r\n perimeter = cv2.arcLength(curve=contour, closed=True)\r\n # Approximating/Finding the corners of the image from the obtained corners..\r\n approx_corners = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)\r\n # Find the bounding box rectangle for the approximated corners..\r\n x, y, width, height = cv2.boundingRect(approx_corners)\r\n # Return the values with which a rectangle can be drawn..\r\n return x, y, width, height", "def find_centroids(self, img, n=1):\n # Find contours\n contours = cv2.findContours(np.uint8(img), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Get centers and areas\n centers = []\n areas = []\n for c in contours:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / max(M[\"m00\"],1))\n cY = int(M[\"m01\"] / max(M[\"m00\"],1))\n centers.append([cX,cY])\n areas.append(cv2.contourArea(c))\n \n # Make sure we have enough contours\n detected=True\n while len(areas)<n:\n centers.append(None)\n areas.append(0)\n detected=False\n \n # Find top n sorted contours\n sorted_centers = []\n for i in np.argsort(-1*np.array(areas))[:n]:\n sorted_centers.append(centers[i])\n \n return np.array(sorted_centers), detected", "def compute_covar_from_instance_centroids(instance_centroids):\n\n cov_mat_allStructures = {}\n radii_allStructures = {}\n ellipsoid_matrix_allStructures = {}\n for name_s, centroids in sorted(instance_centroids.items()):\n centroids2 = np.array(centroids)\n cov_mat = np.cov(centroids2.T)\n cov_mat_allStructures[name_s] = cov_mat\n u, s, vt = np.linalg.svd(cov_mat)\n # print name_s, u[:,0], u[:,1], u[:,2],\n radii_allStructures[name_s] = np.sqrt(s)\n ellipsoid_matrix_allStructures[name_s] = vt\n\n return cov_mat_allStructures, radii_allStructures, ellipsoid_matrix_allStructures", "def xy_center_directions(contours, image: ndarray):\n return contours_.calculate_normalized_screen_space(contours, image)", "def get_center_of_mass_allies(self,obs):", "def plotContours(self, isNormalize=True, altLevels=np.arange(-30., 50., 5.), aziLevels=np.arange(0., 120., 5.),\r\n isPlottingBorder=True, inline=False, lineWidth=3, figSize=(12, 12), fontSize=15, altAxis=None,\r\n aziAxis=None):\r\n\r\n if not hasattr(self, 'altPosMapf'):\r\n self._getSignMap()\r\n\r\n altPosMap = self.altPosMapf\r\n aziPosMap = self.aziPosMapf\r\n\r\n if isNormalize:\r\n altPosOrigin, aziPosOrigin = self.getVisualFieldOrigin()\r\n altPosMap = altPosMap - altPosOrigin\r\n aziPosMap = aziPosMap - aziPosOrigin\r\n\r\n if hasattr(self, 'vasculatureMap') and type(self.vasculatureMap) != type(None) and isPlottingBorder:\r\n zoom = self.vasculatureMap.shape[0] / altPosMap.shape[0]\r\n altPosMap = ni.zoom(altPosMap, zoom)\r\n aziPosMap = ni.zoom(aziPosMap, zoom)\r\n totalMask = ni.zoom(self._generateTotalMask().astype(np.float32), zoom)\r\n altPosMap[totalMask < 0.5] = np.nan\r\n aziPosMap[totalMask < 0.5] = np.nan\r\n else:\r\n totalMask = self._generateTotalMask()\r\n altPosMap[totalMask == 0] = np.nan\r\n aziPosMap[totalMask == 0] = np.nan\r\n\r\n X, Y = np.meshgrid(np.arange(altPosMap.shape[1]),\r\n np.arange(altPosMap.shape[0]))\r\n\r\n # plotting altitute contours\r\n if not altAxis:\r\n altf = plt.figure(figsize=figSize, facecolor='#ffffff')\r\n altAxis = altf.add_subplot(111)\r\n\r\n altContour = altAxis.contour(X,\r\n Y,\r\n altPosMap,\r\n inline=inline,\r\n levels=altLevels,\r\n linewidths=lineWidth)\r\n\r\n if inline:\r\n altContour.clabel(inline=inline, fontsize=fontSize, fmt='%1.1f')\r\n else:\r\n altAxis.get_figure().colorbar(altContour)\r\n\r\n if isPlottingBorder:\r\n self.plotFinalPatchBorders(plotAxis=altAxis,\r\n plotName=False,\r\n isTitle=False,\r\n isColor=False,\r\n borderWidth=lineWidth,\r\n interpolation='bilinear')\r\n\r\n altAxis.set_title('Altitute Positions')\r\n\r\n # plotting azimuth contours\r\n if not aziAxis:\r\n azif = plt.figure(figsize=figSize, facecolor='#ffffff')\r\n aziAxis = azif.add_subplot(111)\r\n\r\n aziContour = aziAxis.contour(X,\r\n Y,\r\n aziPosMap,\r\n inline=inline,\r\n levels=aziLevels,\r\n linewidths=lineWidth)\r\n if inline:\r\n aziContour.clabel(inline=1, fontsize=fontSize, fmt='%1.1f')\r\n else:\r\n aziAxis.get_figure().colorbar(aziContour)\r\n\r\n if isPlottingBorder:\r\n self.plotFinalPatchBorders(plotAxis=aziAxis,\r\n plotName=False,\r\n isTitle=False,\r\n isColor=False,\r\n borderWidth=lineWidth,\r\n interpolation='bilinear')\r\n\r\n aziAxis.set_title('Azimuth Positions')\r\n\r\n return altAxis, aziAxis", "def add_temp_clim_normals(gdf_of_interest,\n grid_of_minmax_temp_clim_norm_y = clim_norm_minmax_temp_y_np_unique, \n grid_of_minmax_temp_clim_norm_x = clim_norm_minmax_temp_x_np_unique, \n grid_of_mean_temp_clim_norm_y = clim_norm_mean_temp_y_np_unique, \n grid_of_mean_temp_clim_norm_x = clim_norm_mean_temp_x_np_unique): \n mean_monthly_min = []\n mean_monthly_max = []\n mean_monthly_temp = []\n for index in gdf_of_interest.index:\n # Find the closest x and y grid points for the mean min/max temperatures\n closest_y_index = find_nearest_point_1D(grid_of_minmax_temp_clim_norm_y, \n gdf_of_interest.geometry.y[index], \n print_nearest_val=False)\n closest_x_index = find_nearest_point_1D(grid_of_minmax_temp_clim_norm_x, \n gdf_of_interest.geometry.x[index], \n print_nearest_val=False)\n \n # Find the closest x and y grid points for the monthly mean temperature\n closest_y_index_mean = find_nearest_point_1D(grid_of_mean_temp_clim_norm_y, \n gdf_of_interest.geometry.y[index], \n print_nearest_val=False)\n closest_x_index_mean = find_nearest_point_1D(grid_of_mean_temp_clim_norm_x, \n gdf_of_interest.geometry.x[index], \n print_nearest_val=False)\n \n \n \n # Find the month of interest and define the correct format for the different file formats\n month_of_interest = int(gdf_of_interest[\"Month\"][0])\n min_month_of_int_format = \"Tn_m\" + str(month_of_interest)\n max_month_of_int_format = \"Tx_m\" + str(month_of_interest)\n mean_month_of_int_format = \"Tm_m\" + str(month_of_interest)\n \n # Append relevant climate normal data\n mean_monthly_min.append(gdf_clim_norm_temp_TN.loc[\n (gdf_clim_norm_temp_TN[\"east\"] == grid_of_minmax_temp_clim_norm_x[closest_x_index]) &\n (gdf_clim_norm_temp_TN[\"north\"] == grid_of_minmax_temp_clim_norm_y[closest_y_index]),\n min_month_of_int_format].values[0])\n \n mean_monthly_max.append(gdf_clim_norm_temp_TX.loc[\n (gdf_clim_norm_temp_TX[\"east\"] == grid_of_minmax_temp_clim_norm_x[closest_x_index]) &\n (gdf_clim_norm_temp_TX[\"north\"] == grid_of_minmax_temp_clim_norm_y[closest_y_index]),\n max_month_of_int_format].values[0])\n \n # NOTE: We currently do not have the NI data so assume that for NI stations\n # the mean value is in the middle of the min/max values.\n if (len(gdf_clim_norm_temp_mean.loc[\n (gdf_clim_norm_temp_mean[\"east\"] == grid_of_mean_temp_clim_norm_x[closest_x_index_mean]) &\n (gdf_clim_norm_temp_mean[\"north\"] == grid_of_mean_temp_clim_norm_y[closest_y_index_mean]),:]) == 0):\n mean_monthly_temp.append(np.mean([mean_monthly_min[-1], mean_monthly_max[-1]]))\n else:\n mean_monthly_temp.append(gdf_clim_norm_temp_mean.loc[\n (gdf_clim_norm_temp_mean[\"east\"] == grid_of_mean_temp_clim_norm_x[closest_x_index_mean]) &\n (gdf_clim_norm_temp_mean[\"north\"] == grid_of_mean_temp_clim_norm_y[closest_y_index_mean]),\n mean_month_of_int_format].values[0])\n \n \n gdf_of_interest[\"Mean Monthly Min Temp\"] = mean_monthly_min\n gdf_of_interest[\"Mean Monthly Max Temp\"] = mean_monthly_max\n gdf_of_interest[\"Mean Monthly Temp\"] = mean_monthly_temp\n \n return", "def moments(infile):\n \n version = 0.0\n #ruler75###########################################\n # ALL THE IMPORT STUFF \n import read_sex\n import string\n from flags import addflag, allflags,isflagon\n from momsource import momsource, momcat\n from time import time\n import os, sys\n import pyfits\n from CommonTools.loadascii import loadascii\n import momlib\n quitpath = momlib.quitpath\n secs_to_dhms = momlib.secs_to_dhms\n from Moments.latex import LaTeX\n from copy import copy\n from algorithms import get_stat\n # END IMPORT STUFF\n \n isthere = os.path.exists\n \n t1 = time()\n \n execpars = momlib.read_inputs(infile) # execution parameters\n\n #ruler75############################################\n \n # READOUT OF CATALOGUE\n \n father_cat = momcat(execpars) # initialize input catalog\n father_cat.read_catsex() # read input catalog\n \n for key in father_cat.flags:\n if father_cat.flags[key] == False :\n print \"\"\"SOME REQUESTED PARAMETER NOT IN CATALOGUE: %s\"\"\" %\\\n key\n stop()\n del key\n \n # READOUT OF SEX FILE\n # dictionary with keywords:values\n # father_sex = read_sex.read_filesex(execpars['father_sex_name'])\n # Not used by now.\n \n # Filtering of catalog using an external file which selects which objects\n # in the SEXTRACTOR catalog are to be analyzed.\n \n if father_cat.execpars['NUMBERs_f'][0] != 'None' :\n NUMBERs_f = father_cat.execpars['NUMBERs_f'][0]\n cols = ['NUMBER']\n formats = 'i'\n load = loadascii(NUMBERs_f,cols,formats,separator='blank')\n NUMBERs = load['NUMBER']\n indxs = []\n for number in NUMBERs:\n indxs.append(num.where(cat['NUMBER'] == number)[0])\n indxs = (num.array(indxs),)\n for key in father_cat:\n father_cat[key] = father_cat[key][indxs]\n \n elif 'DO' in father_cat and father_cat.execpars['useDo'][0] == 1:\n indxs = num.where(father_cat['DO'] == 1)\n NUMBERs = father_cat['NUMBER'][indxs]\n for key in father_cat:\n father_cat[key] = father_cat[key][indxs]\n \n if father_cat.execpars['delay'][0] != 0:\n delay = father_cat.execpars['delay'][0]\n for key in father_cat:\n father_cat[key] = father_cat[key][delay:]\n \n nobj_out = len(father_cat[father_cat.keys()[0]]) # number of objects to \n # analyze\n \n father_cat.initialize() # initialize output catalog\n \n # LOADING IMAGES:\n \n extension = 0 # Image extension. Not ready to handle higer extensions.\n \n # ARE IMAGES TOO BIG? THEN USE FITSCUT TO MAKE STAMPS.\n stampmode = execpars['cutmode'][0]\n \n if 'SEGIMAGE' not in father_cat and 'IMAGE' not in father_cat:\n \n if stampmode == 'pyfits':\n father_img = pyfits.getdata(execpars['father_img_name'],\\\n ext=extension).astype('Float32')\n father_seg = pyfits.getdata(execpars['father_seg_name'],\\\n ext=extension).astype('Int32')\n try: father_mask = pyfits.getdata(execpars['father_mask_name'],\\\n ext = extension).astype('Int32')\n except KeyError : pass\n father_img_dim = father_img.shape\n elif stampmode == 'fitscut' or stampmode == 'pyraf':\n father_img_name = execpars['father_img_name']\n father_seg_name = execpars['father_seg_name']\n try : \n father_mask_name = execpars['father_mask_name']\n except KeyError:\n pass\n father_img_dim = pyfits.getdata(father_img_name,\\\n ext=extension).shape\n \n # LOOP OVER SOURCES AND SOURCE OBJECT CREATION\n \n father_img_name_prev = ''\n \n try: Ndump = father_cat.execpars['Ndump'][0]\n except KeyError: Ndump = 0\n \n # MAIN LOOP\n \n for counter in range(nobj_out):\n \n if 'SEGIMAGE' in father_cat and 'IMAGE' in father_cat:\n \n if father_cat['IMAGE'][counter] != father_img_name_prev:\n father_img_name = father_cat['IMAGE'][counter]\n father_seg_name = father_cat['SEGIMAGE'][counter]\n try: father_mask_name = father_cat['MASK'][counter]\n except KeyError: father_mask_name = 'None'\n \n if stampmode == 'pyfits':\n father_img = pyfits.getdata(father_img_name,\\\n ext=extension).astype('Float32')\n father_img_dim = father_img.shape\n father_seg = pyfits.getdata(father_seg_name,\\\n ext=extension).astype('Int32')\n if isthere(father_mask_name):\n father_mask = pyfits.getdata(father_mask_name,\\\n ext=extension).astype('Int32')\n else : pass\n \n else:\n father_img_dim = pyfits.getdata(father_img_name,\\\n ext= extension).shape\n \n father_img_name_prev = father_img_name\n \n else : pass\n \n # TIME CONTROL\n \n t3 = time()\n print '\\nprocessing object %i of a total of %i\\n' % \\\n (counter+1,nobj_out)\n talready = t3-t1\n talready_fr = secs_to_dhms(talready)\n if talready_fr[0]>1:\n print '... %i days, %i hours, %i minutes, %f seconds since start' % \\\n talready_fr[0:]\n else:\n print '...%i hours, %i minutes, %f seconds since start' % \\\n talready_fr[1:]\n \n tahead = ((t3-t1)/(counter+1))* (nobj_out-counter+1)\n tahead_fr = secs_to_dhms(tahead)\n if tahead_fr[0] > 0 :\n print '%i days, %i hours, %.2f minutes to finish...' % \\\n tahead_fr[0:3]\n else:\n print '%i hours, %.2f minutes to finish...' % tahead_fr[1:3]\n \n source = momsource(execpars) # inherits from dict class.\n for col in father_cat : source[col] = father_cat[col][counter]\t\t \n del col\n\t\n if source.execpars['useMANBACK'][0] == 1:\n source['BACKGROUND'] = source.execpars['MANBACK'][0]\n \n # WINDOW DEFINITION\n \n source.getwindow(father_img_dim)\n \n # MAKING STAMPS\n extension = 0 # default by now\n \n\t\n if stampmode == 'pyfits':\n source.make_stamp(imgname=None,img=father_img,\\\n name='STAMP',extension=extension,mode=stampmode)\n source.make_stamp(imgname=None,img=father_seg,\\\n name='SEGSTAMP',extension=extension,mode=stampmode)\n try: source.make_stamp(imgname=None,img=father_mask,\\\n name='EXTMASK',extension=extension,mode=stampmode)\n except NameError : source['EXTMASK'] = None\n \n elif stampmode == 'fitscut' or stampmode=='pyraf':\n \n source.make_stamp(imgname=father_img_name,img=None,\\\n name='STAMP',extension=extension,mode=stampmode)\n source.make_stamp(imgname=father_seg_name,img=None,\\\n name='SEGSTAMP',extension=extension,mode=stampmode)\n if father_mask_name != 'None':\n source.make_stamp(imgname=father_mask_name,img=None,\\\n name='EXTMASK',extension=extension,mode=stampmode)\n else : source['EXTMASK'] = None\n \n # \"MASQUERADE...\"\n \n # 1 is masked, 0 is non masked\n \n source.make_mask(source['SEGSTAMP'],name='SEXMASK',\\\n mask_in=None,mode='nosky') # array.\n source.make_mask(source['SEGSTAMP'],name='SEXMASKOTHER',\\\n mask_in=None,mode='withsky') # array.\n\n\n if source['EXTMASK'] != None : \n source['MASK'] = momlib.mergemasks((source['SEXMASK'],\\\n source['EXTMASK']))\n source['MASKOTHER'] = momlib.mergemasks((source['SEXMASKOTHER'],\\\n source['EXTMASK']))\n source['SKYMASK'] = momlib.mergemasks((1-source['SEXMASK'],\\\n source['SEXMASKOTHER'],source['EXTMASK']))\n \n else : \n source['MASK'] = source['SEXMASK']\n source['MASKOTHER'] = source['SEXMASKOTHER']\n source['SKYMASK'] = momlib.mergemasks((1-source['SEXMASK'],\\\n source['SEXMASKOTHER']))\n \n source.execpars[\"version\"] = version\n if bool(source.execpars['makefits'][0]):\n source.wrap2mef()\n \n # If there's no pixel different from 0 in object, flag as \"BLANK\".\n nselected = len(num.where(source['MASK']==0)[0])\n \n nonblankprocent = 100.0 * len(num.where(((1-source['MASK']) * \\\n source['STAMP']) != 0.)[0])/ nselected\n print '\\nnonblankprocent = %.1f\\n' % nonblankprocent\n \n if nonblankprocent < 90.:\t \n source['flags'] = addflag(source['flags'],allflags['BLANK'])\n isblank = True\n print '\\n BLANK OBJECT!\\n'\t \n else: isblank = False\n \n # Do verbose Graphical Output?\n \n dograph = execpars['dograph'][0] == 1\n if dograph: \n source['figures'] = {}\n source['figcomms'] = {}\n \n # MANDATORY\n \n # mandatory: radial profile, petrosian radius, petrosian mask, \n # 1st & 2nd order moments\n # radial returns radial profile (within MASK)\n \n source['BOXY'] = 0 # Elliptical apertures always.\n \n # Measure sky sigma: THIS SAVES SOME TIME, AS IT IS ALREADY\n # REQUIRED BY SEVERAL TASKS.\n \n sky_sigma = source.execpars['sigma_sky'][0]\n \n source.getsky()\n\t\n if source['SKY_MEDIAN'] != None and \\\n source.execpars['useMANBACK'][0] == -1:\n print '\\nMeasured background : %.2e\\n' % source['SKY_MEDIAN']\n source['BACKGROUND'] = source['SKY_MEDIAN']\n \n if int(sky_sigma) == -1:\n sky_sigma = source['SKY_SIGMA']\n print 'sky_sigma = %.2e' % sky_sigma\n \n if sky_sigma != None : source.execpars['sigma_sky'] = [sky_sigma]\n del sky_sigma\n \n # RADIAL PROFILE\n tRAD_1 = time()\n if not isblank:\n \n if 'M_RADIAL' in execpars['toexec']: \n if execpars['doFineRadial'][0]==0:\n source.radial_v1(dograph=dograph)\n else:\n source.radial_v3(dograph=dograph)\n \n tRAD_2 = time()\n print '%f seconds in making RADIAL profile' % (tRAD_2-tRAD_1,)\n \n # SAVE RADIAL PROFILE\n tRADSAVE_1 = time()\n if 'M_RADIAL' in execpars['toexec'] and \\\n bool(source.execpars['saveradial'][0]) :\n try: imgid = father_img_name\n except NameError: imgid = \\\n quitpath(source.execpars['father_img_name'])\n imgid = imgid[0:string.rfind(imgid,'.')]\n id = '%s' % source['name']\n radialfile = '%s_%s_RADIAL.txt' % (id,imgid)\n source.SaveRadial(radialfile)\n tRADSAVE_2 = time()\n print '%f seconds in saving RADIAL profile' % \\\n (tRADSAVE_2-tRADSAVE_1,)\n \n # PETROSIAN returns petrosian radius, intensity and \n # flux (within MASK)\n if 'M_PETRO' in execpars['toexec']: \n #source.petrosian()\n\t\tsource.petrosian2()\n \n # petromsk returns PETROSIAN MASK!\n if 'M_PETROMSK' in execpars['toexec']:\n source.petromsk()\n \n # Average Signal to Noise ratio\n if 'SNR' in execpars['toexec']:\n source.snr()\n \n # ellipticity parameters are computed before the mask may be\n # updated to Petrosian mask.\n # ellipse updates A, B, THETA, ELONGATION, ELLIP\n # (within MASK^SEGMAP)\n \n tELL_1 = time()\n if 'M_ELLIP' in execpars['toexec']:\n source.ellipse()\n tELL_2 = time()\n print '%f seconds in running ellipse' % (tELL_2-tELL_1,)\n \n tMOM_1 = time()\n if 'M_MOM' in execpars['toexec']:\n source.getmoments()\n tMOM_2 = time()\n print '%f seconds in running moments' % (tMOM_2-tMOM_1,)\n else:\n # RADIAL\n radiusflags = 0L\n radiusflags = addflag(radiusflags,allflags['NORADIAL'])\n source['M_RADIAL'] = {'radii':None,'cumulflx':None,\\\n 'intens':None,'npix':None,'npixout':None,\\\n 'radiusflags':radiusflags}\n # SAVE RADIAL\n source['radial_file'] = '0'\n # PETROSIAN\n source['R_PETRO'] = -99.0 ; source['I_PETRO'] = -99.0\n source['F_PETRO'] = -99.0\n # PETROMSK\n source['M_PETROMSK'] = None\n # SNR\n source['SNR'] = -99.\n # ELLIPSE\n source['M_A'] = -99.0 ; source['M_B'] = -99.0 \n source['M_THETA'] = -99.0 ; source['M_ELONG'] = -99.0 \n source['M_ELLIP'] = -99.0\n # Moments\n source['M_X'] = -99.0 ; source['M_Y'] = -99.0\n source['M_X2'] = -99.0 ; source['M_Y2'] = -99.0\n source['M_XY'] = -99.0\n \n # END MANDATORY\n \n # Use of petrosian mask\n # \n if source.execpars['usepetro'][0] == 1 and \\\n source['M_PETROMSK'] != None:\n if source['EXTMASK'] != None : \n bunch_of_masks = (source['M_PETROMSK'],\\\n source['SEXMASKOTHER'],source['EXTMASK'])\n else : \n bunch_of_masks = (source['M_PETROMSK'],source['SEXMASKOTHER'])\n \n source['MASK'] = momlib.mergemasks(bunch_of_masks)\n\t source['flags'] = addflag(source['flags'],allflags['USEPETROMSK'])\n \n # WARNING: 'THE PETROSIAN MASK MAY BE UNNOTICEDLY\n # TRUNCATED BY THE WINDOW!!'\n \n # OPTIONAL PARAMETERS\n \n if not isblank:\n \n # 'Basics' gets area, average intensity and total flux of object \n # within MASK.\n \n if 'BASICS' in execpars['toexec']:\n source.Basics()\n \n # SECOND ORDER MOMENT (ALTERNATIVE TAKE)\n if 'M2' in execpars['toexec']:\n source.M2()\n \n # RADII WHICH CONTAIN SEVERAL FLUX RATIOS\n \n if 'RADII' in execpars['toexec']:\n #source.Radii()\n\t\tsource.Radii2()\n \n # FLUX INSIDE A GIVEN RADIUS (APFLXRADIUS)\n \n if 'APFLX' in execpars['toexec'] and 'APFLXRADIUS' in source:\n source.ApFlx()\n \n # COORDINATES OF PEAK EMISSION\n \n # peak updates peak center (within MASK^SEGMAP)\n if 'M_PEAK' in execpars['toexec']: \n source.peak() # Minimum boxwidth = 3 pix\n \n # GINI\n tG_1 = time()\n if 'M_GINI' in execpars['toexec'] : \n source.gini(dograph)\n tG_2 = time()\n print '%f seconds in running Gini' % (tG_2-tG_1,)\n \n # ASYMMETRY\n \n tA_1 = time()\n if 'ASYM' in execpars['toexec'] : \n source.asymmetry(dograph)\n tA_2 = time()\n print '%f seconds in running asymmetry' % (tA_2-tA_1,)\n \n # ANGULAR CONTRAST\n \n tAC_1 = time()\n if 'AC' in execpars['toexec']:\n source.AC(dograph)\n tAC_2 = time()\n print '%f seconds in running AC' % (tAC_2-tAC_1,)\n \n # CONCENTRATION\n \n tC_1 = time()\n if 'CONCENT' in execpars['toexec']:\n source.concent(dograph)\n tC_2 = time()\n print '%f seconds in running concent' % (tC_2-tC_1,)\n \n # FIND PEAKS (TO BE DROP SOON...)\n \n tNP_1 = time()\n if 'NPEAKS' in execpars['toexec']:\n source.FindPeaksII(dograph) # ON TESTS\n tNP_2 = time()\n print '%f seconds in running FindPeaks' % (tNP_2 - tNP_1,)\n \n # CLUMPS STATISTICS\n \n tNC_1 = time()\n if 'NCLUMPS' in execpars['toexec']:\n source.FindClumps(dograph) # ON TESTS\n tNC_2 = time()\n print '%f seconds in running FindClumps' % \\\n (tNC_2-tNC_1,)\n \n # CLUMPINESS\n \n tCL_1 = time()\n if 'CLUMPY' in execpars['toexec'] and source['R_PETRO'] != -99:\n source.clumpy(dograph) # Minimum boxwidth = 3 pix\n else:\n source['M_S'] = -99.0 ; source['M_S_SKY'] = -99.0\n tCL_2 = time()\n print '%f seconds in running clumpy' % (tCL_2 - tCL_1,)\n \n # AXIS ASYMETRY (MAJOR AXIS)\n \n tMAXAXIS_1 = time()\n if 'MAJOR_SIM' in execpars['toexec']:\n source.axis_asymmetry(axis='major',dograph=dograph)\n tMAXAXIS_2 = time()\n print '%f seconds in MAJOR_SIM' % (tMAXAXIS_2-tMAXAXIS_1,)\n \n # AXIS ASYMMETRY (MINOR AXIS)\n \n tMINAXIS_1 = time()\n if 'MINOR_SIM' in execpars['toexec']:\n source.axis_asymmetry(axis='minor',dograph=dograph)\n tMINAXIS_2 = time()\n print '%f seconds in running MINOR_SIM' % \\\n (tMINAXIS_2-tMINAXIS_1,)\n \n # M20\n \n tM20_1 = time()\n if 'M20' in execpars['toexec'] : \n if execpars['M20mode'][0] == 'Lotz':\n source.M20Lotz(dograph)\n elif execpars['M20mode'][0] == 'Azzo':\n source.M20Azzo(dograph)\n tM20_2 = time() \n print '%f seconds in running M20 (%s)' % (tM20_2-tM20_1,\\\n execpars['M20mode'][0])\n \n # EXCENTRICITY\n \n tE_1 = time()\n if 'EXCENTRICITY' in execpars['toexec']:\n source.Excentricity(dograph)\n tE_2 = time()\n print '%f seconds in running Excentricity' % (tE_2-tE_1,)\n \n # FILLING FACTOR\n \n tFF_1 = time()\n if 'FFACTOR' in execpars['toexec']:\n source.FFactor(dograph)\n tFF_2 = time()\n print '%f seconds in running FFactor' % (tFF_2-tFF_1)\n \n # VISITORS\n ttr_1 = time()\n if 'TRUNC' in execpars['toexec']:\n scale = 0.03\n zeroT = 24.84315\n source.trunc(scale,zeroT,dograph)\n ttr_2 = time()\n print '%f seconds in running trunc' % (ttr_2-ttr_1)\n \n else : \n # BASICS\n source['M_NPIX'] = -99 ; source['M_FLUX'] = -99.\n source['M_AVINT'] = -99.\n # M2\n source['M_M2'] = -99.\n # RADII\n source['M_R20'] = -99. ; source['M_R50'] = -99. ; \n source['M_R80'] = -99. ;\n # APFLX\n source['M_APFLX'] = -99.\n # PEAK\n source['M_XPEAK'] = -99. ; source['M_YPEAK'] = -99.\n # GINI\n source['M_GINI'] = -99.\n # ASYMMETRY\n source['M_AS_X'] = -99. ; source['M_AS_Y'] = -99.\n source['M_AS'] = -99. ; source['M_AS_SKY'] = -99.\n # AC\n source['MAC8'] = -99. ; source['MAC8M'] = -99.\n source['MAC4'] = -99.\n # CONCENT\n source['M_C'] = -99.0\n # NPEAKS\n source['M_NPEAKS'] = -99\n # NCLUMPS\n source['M_NUM_CL'] = -99 \n source['M_MAX_CL'] = -99. ; source['M_MIN_CL'] = -99.\n source['M_ACC_CL'] = -99. ; source['M_FAR_CL'] = -99.\n # CLUMPY\n source['M_S'] = -99. ; source['M_S_SKY'] = -99. \n # MAJOR_SIM\n source['M_AXS_MAJ'] = -99. ; source['M_AXS_SKY_MAJ'] = -99.\n # MINOR_SIM\n source['M_AXS_MIN'] = -99. ; source['M_AXS_SKY_MIN'] = -99.\n # getmoments\n source['M_X'] = -99. ; source['M_Y'] = -99.\n source['M_X2'] = -99. ; source['M_Y2'] = -99. ; source['M_XY'] = -99.\n # M20\n source['M20'] = -99.\n # EXCENTRICITY\n source['M_E'] = -99.\n # FFACTOR\n source['M_FF'] = -99.\n \n # SOME OTHER OPTIONAL GRAPHIC OUTPUTS\n \n if dograph:\n source.stamp_graph('STAMP','stamp')\n source.stamp_graph('MASK','mask')\n if 'SKYSTAMP' in source : \n source.stamp_graph('SKYSTAMP','sky')\n source.petropeakcenter_graph()\n \n pdfid = source._getGraphId()\n latexfile = '%s.tex' % pdfid\n pdffile = '%s.pdf' % pdfid\n psfile = '%s.ps' % pdfid\n latex = LaTeX()\n figures = copy(source['figures'])\n figcomms = copy(source['figcomms'])\n header = '%s' % source['name']\n latex.DoBody(header,figures,figcomms)\n latex.Write(latexfile)\n latex.Compile(latexfile,cleanafter=True,\\\n figures=copy(source['figures']))\n latex.Ps2Pdf(psfile,pdffile,cleanafter=True)\n \n source['PDF'] = pdffile\n \n # PACKAGING OF REMAINING DATA TO THE OUTPUT OBJECT\n for key in execpars['to_output']: \n try: \n father_cat[key][counter] = source[key]\n except : stop()\n \n del source # free memory\n \n #sys.exit()\n t4 = time()\n lapsus = t4 - t3\n print '%i seconds in analyzing 1 object\\n\\n\\n' % lapsus\n \n \n # DUMPING OUTPUT to a FILE\n # it writes outputs to a file in sextractor fashion.\n # outfile_name = dumpcat(outfile_name,output,to_output)\n if (Ndump >=1) and (counter % Ndump == 0):\n if isthere(execpars['outfile_name']):\n os.system('rm %s' % execpars['outfile_name'])\n father_cat.dumpcat()\n \n if Ndump > 1:\n if isthere(execpars['outfile_name']):\n os.system('rm %s' % execpars['outfile_name'])\n \n father_cat.dumpcat()\n \n t2 = time()\n \n lapsus = t2 - t1\n \n lapsus_fr = secs_to_dhms(lapsus)\n \n if lapsus_fr[0]>0.:\n print \"\"\"\\n\\n\\n'Only' %i days, %i hours, %i minutes, %f seconds \n in analyzing %i objects\\n\\n\\n\"\"\" % \\\n (lapsus_fr[0],lapsus_fr[1],lapsus_fr[2],lapsus_fr[3],nobj_out)\n else: \n print \"\"\"'Only' %i hours, %i minutes, %f seconds in analyzing \n %i objects\\n\\n\\n\"\"\" % (lapsus_fr[1],lapsus_fr[2],lapsus_fr[3],nobj_out)\n \n return None", "def centroids(self):\n return self.mdm_.covmeans_", "def displayContours(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"contour\") == \"1\":\n needleNode = slicer.mrmlScene.GetNodeByID(modelNode.GetAttribute(\"needleID\"))\n if needleNode != None:\n if needleNode.GetDisplayVisibility()==1:\n modelNode.SetDisplayVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))\n d = modelNode.GetDisplayNode()\n d.SetSliceIntersectionVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))", "def drawShapes(contours, realImg, minArea=500, name=False):\n\n # Getting Shape of Real Image\n realH, realW, _ = realImg.shape\n\n thickness = realW // 275 # Setting Thickness\n fontScale = realH / 1000 # Setting fontScale\n\n if not contours:\n print(\"No Contours Found\")\n for cnt in contours:\n cntArea = cv2.contourArea(cnt) # Getting Area\n # i = -1\n # i += 1\n # color = (0, 255, 0)\n\n if cntArea < minArea:\n continue\n\n # Drawing contours\n # cv2.drawContours(realImg, [cnt], i, color, 3)\n\n # Finding Perimeters of each cnt\n peri = cv2.arcLength(cnt, True) # True for Closed Shapes\n approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)\n\n # Getting x, y co-ordinates and width, height of each cnt\n x, y, w, h = cv2.boundingRect(approx)\n\n # Making Black Rectangle around each contour\n cv2.rectangle(realImg,\n (x - thickness, y - thickness),\n (x + w + thickness, y + h + thickness),\n (0, 0, 0), thickness)\n\n # Making White Rectangle around each contour\n cv2.rectangle(realImg, (x, y), (x + w, y + h),\n (255, 255, 255), thickness // 2)\n\n if not name:\n continue\n\n corners = len(approx) # Counting Corners\n objType = None\n\n if corners == 3:\n objType = \"Triangle\"\n\n elif corners == 4:\n aspRatio = w / float(h)\n if aspRatio > 0.9 and aspRatio < 1.1:\n objType = \"Square\"\n else:\n objType = \"Rectangle\"\n\n elif corners == 5:\n objType = \"Pentagon\"\n\n elif corners == 6:\n objType = \"Hexagon\"\n\n elif corners > 7:\n if detectObject.isCircle(cnt, realImg):\n objType = \"Circle\"\n\n if objType is None:\n objType = \"Not Found\"\n\n if thickness == 1:\n thickness = 2\n\n # Setting FontFace\n fontFace = cv2.FONT_HERSHEY_DUPLEX\n\n # Getting Shape of Image\n h2, _, _ = realImg.shape\n\n # If Image Height is too Small, increase fontScale\n if h2 < 300:\n fontScale += 0.04\n elif h2 < 400:\n fontScale += 0.02\n\n textBgHeight = int(fontScale * 40)\n k = 0\n \n if h2 > 500:\n k = textBgHeight // 3\n\n # Copying the contents where we want to write text\n sub_img = realImg[y+h: y+h+textBgHeight+k, x:x+w]\n\n # Create a new black image with same shape\n rect = np.zeros(sub_img.shape, dtype=np.uint8)\n\n # Creating a new transparent image from\n # `copied image(sub_img)` and `black image`\n res = cv2.addWeighted(sub_img, .25, rect, .5, 1.0)\n\n # Replacing the part of realImg\n realImg[y+h: y+h+textBgHeight+k, x: x+w] = res\n\n # Putting White Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (255, 255, 255), thickness * 2)\n\n # # Putting Black Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (0, 0, 0), thickness // 2)", "def extract(self):\r\n # image import\r\n if self.settings.show_extraction_headings:\r\n print(\"importing image\")\r\n img_orig, img_comp, img_disp, img_blank_orig, img_blank_comp, img_blank_disp = imgCapture(self.img_orig, self.settings)\r\n if self.settings.show_basic_extraction_graphics:\r\n imshow(imageResize(img_comp, height=self.settings.disp_height), self.settings.env)\r\n\r\n # contour finder\r\n if self.settings.show_extraction_headings:\r\n print(\"finding contours\")\r\n piece_contours, img_mask_bgr, img_mask, img_masked = contourFinder(img_comp, img_blank_comp, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_mask, height=self.settings.disp_height), self.settings.env)\r\n print(\"Number of Puzzle Pieces:\", len(piece_contours))\r\n imshow(imageResize(img_masked, height=self.settings.disp_height), self.settings.env)\r\n\r\n # clearance radius\r\n if self.settings.show_extraction_headings:\r\n print(\"detecting clearance radii\")\r\n img_circles, circle_centers, radius_max = clearanceRadius(piece_contours, img_mask_bgr, self.settings)\r\n if self.settings.show_extraction_text:\r\n print(\"Max Clearance Radius:\", radius_max)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_circles, height=self.settings.disp_height), self.settings.env)\r\n\r\n # hull creator\r\n if self.settings.show_extraction_headings:\r\n print(\"creating hulls\")\r\n img_hull_mask_bgr, hull, hull_points = hullCreator(piece_contours, img_mask_bgr, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_hull_mask_bgr, height=self.settings.disp_height), self.settings.env)\r\n\r\n # convexity\r\n if self.settings.show_extraction_headings:\r\n print(\"detecting convexity defects\")\r\n img_defects, defects_f = convexity(piece_contours, hull, img_mask_bgr, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_defects, height=self.settings.disp_height), self.settings.env)\r\n\r\n # corner finder\r\n if self.settings.show_extraction_headings:\r\n print(\"finding corners\")\r\n best_rectangles_sorted, best_rectangles_index, av_length, img_corners = cornerFinder(hull, hull_points, piece_contours,\r\n img_mask_bgr, defects_f, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_corners, height=self.settings.disp_height), self.settings.env)\r\n\r\n # center finder\r\n if self.settings.show_extraction_headings:\r\n print(\"finding piece centers\")\r\n img_centers, centers = centerFinder(best_rectangles_sorted, img_corners, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_centers, height=self.settings.disp_height), self.settings.env)\r\n\r\n # side separator\r\n if self.settings.show_extraction_headings:\r\n print(\"splitting piece contours into sides\")\r\n all_curves, side_lengths = sideSeparator(best_rectangles_index, piece_contours, img_blank_comp, self.settings)\r\n\r\n # corner & edge pieces\r\n if self.settings.show_extraction_headings:\r\n print(\"determining piece types\")\r\n img_piece_type, piece_type, edge_type, defects_by_side, interior, edges, corners = pieceType(defects_f, best_rectangles_index,\r\n piece_contours, img_blank_comp)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_piece_type, height=self.settings.disp_height), self.settings.env)\r\n\r\n puzzle_rows, puzzle_columns, corner_piece_count, edge_piece_count, standard_piece_count = puzzleSize(piece_type)\r\n\r\n h_spaced = 2 * radius_max * (puzzle_rows) + 2 * radius_max\r\n w_spaced = 2 * radius_max * (puzzle_columns) + 2 * radius_max\r\n\r\n img_blank_spaced = np.zeros([h_spaced, w_spaced, 3], dtype=np.uint8)\r\n\r\n corner_piece_count = len(corners)\r\n edge_piece_count = len(edges)\r\n standard_piece_count = len(interior)\r\n\r\n # locks searcher\r\n if self.settings.show_extraction_headings:\r\n print(\"detecting locks\")\r\n img_locks, outer_count, inner_count, edge_count = locksSearcher(defects_by_side, corner_piece_count, edge_piece_count,\r\n standard_piece_count, img_mask_bgr, all_curves, self.settings)\r\n if self.settings.show_extraction_text:\r\n print(\"Outer Locks:\", outer_count, \"Inner Locks:\", inner_count, \"Edges:\", edge_count)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_locks, height=self.settings.disp_height), self.settings.env)\r\n\r\n # aligner\r\n if self.settings.show_extraction_headings:\r\n print(\"aligning pieces\")\r\n grid_centers, angles, contours_rotated, all_segments_rotated, all_corners_rotated, processed_edge_types, img_align_segments\\\r\n = aligner(radius_max, best_rectangles_sorted, corners, edges, interior, piece_contours, img_blank_spaced, edge_type, centers,\r\n all_curves, puzzle_rows, puzzle_columns, self.settings)\r\n if self.settings.show_full_extraction_graphics:\r\n imshow(imageResize(img_align_segments, height=self.settings.disp_height), self.settings.env)\r\n\r\n # processed data\r\n if self.settings.show_extraction_headings:\r\n print(\"preparing data\")\r\n processed_pieces, img_processed_segments = processedData(all_segments_rotated, img_blank_spaced, processed_edge_types,\r\n all_corners_rotated, self.settings)\r\n processed_pieces = approxContours(processed_pieces, self.settings.e_contour_smoothing, state=False)\r\n if self.settings.show_basic_extraction_graphics:\r\n imshow(imageResize(img_processed_segments, height=self.settings.disp_height), self.settings.env)\r\n\r\n # bgr data\r\n if self.settings.show_extraction_headings:\r\n print(\"performing BGR data manipulation\")\r\n processed_bgr, img_processed_bgr = bgrData(img_blank_spaced, corners, edges, interior, angles, piece_contours, img_masked, centers,\r\n grid_centers, edge_type)\r\n if self.settings.show_basic_extraction_graphics:\r\n imshow(imageResize(img_processed_bgr, height=self.settings.disp_height), self.settings.env)\r\n\r\n # colour identification\r\n if self.settings.show_extraction_headings:\r\n print(\"identifying piece edge colours\")\r\n colour_contours, colour_contours_xy = colourIdentification(processed_pieces, img_processed_bgr, self.settings)\r\n\r\n self.piece_contours = piece_contours\r\n self.puzzle_rows = puzzle_rows\r\n self.puzzle_columns = puzzle_columns\r\n self.av_length = av_length\r\n self.piece_type = piece_type\r\n self.processed_edge_types = processed_edge_types\r\n self.processed_pieces = processed_pieces\r\n self.colour_contours = colour_contours\r\n self.colour_contours_xy = colour_contours_xy\r\n self.img_processed_bgr = img_processed_bgr\r\n self.grid_centers = grid_centers\r\n self.radius_max = radius_max\r\n self.contours_rotated = contours_rotated\r\n self.corners = corners\r\n self.edges = edges\r\n self.interior = interior\r\n self.img_blank_comp = img_blank_comp\r\n self.img_masked = img_masked\r\n self.centers = centers\r\n self.angles = angles\r\n self.edge_type = edge_type\r\n\r\n if (self.settings.show_extraction_headings):\r\n print(\"EXTRACTION COMPLETE\")", "def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1", "def find_objects(img, threshold=.3):\n thresholded_img = np.uint8(img > threshold)\n _, markers = cv2.connectedComponents(thresholded_img)\n object_centers = []\n for ii in range(1, np.max(markers)):\n masked_img = mask_img(img, markers == ii)\n object_index = np.argmax(masked_img)\n object_center = np.unravel_index(object_index, img.shape)\n object_centers.append(object_center)\n return np.array(object_centers)" ]
[ "0.6727947", "0.6727947", "0.66949683", "0.65000355", "0.6481608", "0.63708127", "0.6356746", "0.6351715", "0.6243697", "0.61840034", "0.6168322", "0.6164202", "0.6142851", "0.6103647", "0.60443234", "0.6014186", "0.6013817", "0.5995825", "0.59587353", "0.59543926", "0.59508264", "0.5949832", "0.5949832", "0.5935131", "0.5892589", "0.5855636", "0.58379424", "0.58336055", "0.5813654", "0.57924664", "0.57883227", "0.5774147", "0.5769794", "0.5754367", "0.57505065", "0.57281256", "0.57094586", "0.56637037", "0.56573427", "0.5649474", "0.5605332", "0.5604968", "0.5600223", "0.55871934", "0.55788124", "0.5539501", "0.5486493", "0.5469563", "0.54658127", "0.54601425", "0.54546624", "0.54207766", "0.54002255", "0.53998196", "0.539439", "0.5385524", "0.53794765", "0.53781116", "0.53763515", "0.5360853", "0.5358239", "0.5355474", "0.53506964", "0.5346691", "0.5333158", "0.5332592", "0.53321767", "0.5326048", "0.53240126", "0.53196824", "0.53027016", "0.5295197", "0.5291987", "0.52914065", "0.5289894", "0.5287596", "0.52839243", "0.52671593", "0.5264891", "0.5252977", "0.525258", "0.52517945", "0.5248385", "0.5248385", "0.5246791", "0.52454066", "0.52442425", "0.5235038", "0.52229404", "0.52222234", "0.51895416", "0.5182953", "0.5179439", "0.5154277", "0.51515204", "0.51497126", "0.51462406", "0.5137505", "0.5133737", "0.51312417" ]
0.77274
0
Calculates a contour perimeter or a curve length.
def __CalculatePerimeter(self, curve): return cv2.arcLength(curve, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def perimeter(self):", "def __CalculateLength(self, curve):\r\n return cv2.arcLength(curve, True)", "def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def perimeter(self):\n\t\treturn 2 * (self.width + self.height)", "def perimeter(self):\n\t\treturn self.height * 4", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)", "def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )", "def get_perimeter_formula(cls):\n pass", "def perimeter(a:float, b:float, c:float):\n return a + b + c", "def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])", "def perimeter(self):\n perimeter = (2 * self.__length) + (2 * self.__width)\n\n return perimeter", "def perimeter(self):\r\n return (2*self.width) + (2*self.height)", "def perimeter(points):\n return sum(get_distances(points))", "def perimeter(self):\n return 2 * (self.height + self.width)", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return sum(self._lengths)", "def edge_perimeter_length(c, stencil=nn_stencil):\n\n return np.sum(np.logical_not(c) * coordination(c, stencil=stencil))", "def PolyPerimeter(Coords):\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n # next point coord - current point coord\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1] - Coords[i,1])**2 )**0.5\n\n return peri", "def perimeter_distance(self, p1, p2):\n\n p1_projection = self.outline.project(shgeo.Point(p1))\n p2_projection = self.outline.project(shgeo.Point(p2))\n\n distance = p2_projection - p1_projection\n\n if abs(distance) > self.outline_length / 2.0:\n # if we'd have to go more than halfway around, it's faster to go\n # the other way\n if distance < 0:\n return distance + self.outline_length\n elif distance > 0:\n return distance - self.outline_length\n else:\n # this ought not happen, but just for completeness, return 0 if\n # p1 and p0 are the same point\n return 0\n else:\n return distance", "def regular_polygon_area(perimeter, apothem):\n return (perimeter * apothem) / 2", "def get_rect_perimeter(length, width):\n length = (str)(length)\n width = (str)(width)\n if((length.isnumeric()) and (length.isnumeric())):\n length = (float)(length)\n width = (float)(width)\n perimeter = 2 * (length + width)\n else:\n perimeter = \"Invalid input, length and width must be numeric value\"\n return perimeter", "def get_corrected_arclength(pts,closed=False):\r\n \r\n l = len(pts)\r\n ptsDown2 = np.concatenate((pts[2:l],pts[0:2]))\r\n ptsDown1 = np.concatenate((pts[1:l],np.array([(pts[0][0],pts[0][1])])))\r\n ptsUp1 = np.concatenate((np.array([(pts[l-1][0],pts[l-1][1])]),pts[0:l-1]))\r\n ptsUp2 = np.concatenate((pts[l-2:l],pts[0:l-2]))\r\n summedPts = ptsDown2 + ptsDown1 + pts + pts + ptsUp1 + ptsUp2\r\n avePts = summedPts/5.0\r\n zoomAvePts = np.round(avePts)\r\n arcLength = cv2.arcLength(zoomAvePts.astype(int),closed)\r\n \r\n return arcLength", "def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)", "def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def area_of_circle(radius):\n return radius", "def circumference(self):\n raise NotImplementedError", "def area_circle(r):\n return (r ** 2) * math.pi", "def perimeter(self) -> ir.FloatingValue:\n return ops.GeoPerimeter(self).to_expr()", "def get_perimeter_formula(cls):\n dict_perimieter = {'circle':\"2πr\", 'square':'2a+2b', 'rectangle':'2a+2b',\n 'triangle':'3a',\"equilateral triangle\":'a+b+c',\n 'regular pentagon':\"5a\"}\n for k,v in dict_perimieter.items():\n if cls.__name__ == k:\n return v", "def calc_area(diameter):\n\n if diameter > 0:\n area = pi * (diameter/2) ** 2\n \n return area", "def circleArea(radius):\n return math.pi * radius * radius", "def lengthPerArea(I):\n perim = skimage.measure.perimeter(I.astype(int), 8)\n LA = perim / np.sum(I)\n print(\"LA (true count): {:.2%}\".format(LA))\n\n # lines probes, every 40 pixels\n probe = np.zeros(I.shape)\n probe[20:-20:40, 20:-20] = 1\n lines = I.astype(int) * probe\n\n # count number of intercepts\n h = np.array([[1, -1, 0]])\n points = scipy.signal.convolve2d(lines, h, mode='same')\n\n nb_lines = np.sum(lines)\n nb_points = np.sum(np.abs(points))\n PL = float(nb_points) / nb_lines\n print(\"pi/2*PL (evaluation): {:.2%}\".format(np.pi/2*PL))", "def test_triangle_get_perimeter(self):\n triangle = Triangle(0, 9, 10, 11)\n self.assertEqual(triangle.get_perimeter(), 30)", "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def equivalentDiameter(cnt):\n\treturn np.sqrt(4 * (cv2.contourArea(cnt)) / np.pi)", "def get_diameter(self, method='volume'):\n\n if method == 'shape':\n pos = self.get_positions() - self.center\n d = 0.0\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(pos, n)\n d += r.max() - r.min()\n return d / len(self.surfaces)\n elif method == 'volume':\n V_cell = np.abs(np.linalg.det(self.lattice_basis))\n N_cell = len(self.atomic_basis)\n N = len(self)\n return 2.0 * (3.0 * N * V_cell / (4.0 * math.pi * N_cell)) ** (1.0/3.0)\n else:\n return 0.0", "def circle_area(radius):\n return math.pi * radius ** 2", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def area(r):\n return np.pi * (r ** 2)", "def calculate_perimeter_diff(gt_perim, perf_perim):\n return abs(gt_perim - perf_perim) / gt_perim", "def __CalculateArea(self, contour):\r\n return cv2.contourArea(contour)", "def calculate_perimeter_ratio(gt_perimeter, perf_perimeter):\n return min(gt_perimeter, perf_perimeter) / max(gt_perimeter, perf_perimeter)", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def perimeter_points(d,n,type = 'int'):\n rimpointsx = np.sin(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1\n rimpointsy = np.cos(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1\n rimpoints = (((d-1)/2))*np.array([rimpointsy,rimpointsx])\n if type == 'int':\n rimpoints = np.round(rimpoints)\n rimpoints = rimpoints.astype(int)\n return rimpoints", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def get_perimeter(self, radius: int = 1) -> set:\n return self.get_neighbourhood(radius) - self.get_neighbourhood(radius - 1)", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def get_pupil_diameter(dlc):\r\n diameters = []\r\n # Get the x,y coordinates of the four pupil points\r\n top, bottom, left, right = [np.vstack((dlc[f'pupil_{point}_r_x'], dlc[f'pupil_{point}_r_y']))\r\n for point in ['top', 'bottom', 'left', 'right']]\r\n # First compute direct diameters\r\n diameters.append(np.linalg.norm(top - bottom, axis=0))\r\n diameters.append(np.linalg.norm(left - right, axis=0))\r\n\r\n # For non-crossing edges, estimate diameter via circle assumption\r\n for pair in [(top, left), (top, right), (bottom, left), (bottom, right)]:\r\n diameters.append(np.linalg.norm(pair[0] - pair[1], axis=0) * 2 ** 0.5)\r\n\r\n # Ignore all nan runtime warning\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\r\n return np.nanmedian(diameters, axis=0)", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def circumference(self):\n return math.pi * self.radius * 2", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def _causal_measure(self, x, y):\r\n\t\tC_xy = self._cross_cumulant_4th(x, y)\r\n\t\tC_yx = self._cross_cumulant_4th(y, x)\r\n\t\tR = C_xy**2 - C_yx**2\r\n\t\treturn R", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def area(self):\r\n return math.pi*(self.__radius**2)", "def calc_half_perimeter(self, source, sinks):\n deltax = 0\n deltay = 0\n assert self.cells[source].x in range(self.nx) and self.cells[source].y in range(self.ny)\n for sink in sinks:\n assert self.cells[sink].x in range(self.nx) and self.cells[sink].y in range(self.ny)\n dx = abs(self.cells[source].x - self.cells[sink].x)\n if dx > deltax:\n deltax = dx\n dy = abs(self.cells[source].y - self.cells[sink].y)\n if dy > deltay:\n deltay = dy\n return deltax + deltay", "def circumference(self):\n return self.width + self.height", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def length(x):\n if ispoint(x):\n # return pointlength(x):\n return 0.0\n elif isline(x):\n return linelength(x)\n elif isarc(x):\n return arclength(x)\n elif ispoly(x):\n return polylength(x)\n elif isgeomlist(x):\n l = 0.0\n for g in x:\n l += length(g)\n return l\n else:\n raise ValueError(\"inappropriate type for length(): \".format(x))", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))", "def getArea(self):\n return math.pi * self.radius ** 2", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def get_interaction_length(self):\n return self.radius + 2.0 #in um", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def computeA(diameter):\n radius = diameter / 2.0\n return np.pi * (radius**2)", "def calculate_curvature(P):\n y = P[:,1].copy()\n x = P[:,0].copy()\n dx = np.gradient(x)\n yd = np.gradient(y, dx)\n ydd = np.gradient(yd, dx)\n return np.sum(ydd**2)", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def getDiameter(self):\n\n hdr = self.header\n if \"cd1_1\" in hdr:\n self.D = abs(hdr[\"cd1_1\"]) * hdr[\"naxis1\"]\n elif \"cdelt1\" in hdr:\n self.D = abs(hdr[\"cdelt1\"]) * hdr[\"naxis1\"]\n else:\n print(\"Warning: no coordinate information found in input header;\")\n print(\" pupil width assumed to be 6.5 meters\")\n self.D = 6.5", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def diameter(self):\n return 2 * self.radius", "def calc_length_distortion(self, x, y):\n\n # get the major axis of the used Earth ellipsoid\n ellaxis = Geodesic.WGS84.a\n\n # get the centre of the subgrid's projection\n fe = self.core.projection.osr_spref.GetProjParm('false_easting')\n fn = self.core.projection.osr_spref.GetProjParm('false_northing')\n\n # create the distances to the projection centre\n dists = np.sqrt((np.array(x) - fe)**2 + (np.array(y) - fn)**2)\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n #\n # is it just about the distance to the centre (c), and as are equally long\n # on the ellipsoid and on the projected plane (the core of of AEQD!)\n k = dists / ellaxis / np.sin(dists / ellaxis)\n\n return k", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)", "def get_dimensions_from_contour(img, cntr, kernel):\n\tmask = np.zeros_like(img) # mask will contain the fitted and adjusted ellipse of a single obstacle\n\tellipse = cv2.fitEllipse(cntr)\n\tx, y, obj_length, obj_height = cv2.boundingRect(cntr)\n\trect = cv2.minAreaRect(cntr)\n\n\tequi_diameter = obj_length # bounding rectangle gives a better approximation of diameter\n\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\tmask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1) # draw the fitted ellipse\n\trows = mask.shape[0]\n\tcols = mask.shape[1]\n\tM = np.float32([[1, 0, 0], [0, 1, equi_diameter / 4]]) # shift mask down to match obstacle, not edge\n\tmask = cv2.warpAffine(mask, M, (cols, rows))\n\tmask = cv2.erode(mask, kernel, iterations=3) # erode the mask to remove background points\n\treturn mask, box, x, y, obj_length, obj_height", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def _compute_isometric_ratio(self, threshold):\n perimeter = 0 # dS\n volume = 0\n\n # A - inside set; B - outside set\n set_A = self.x >= threshold\n set_B = self.x < threshold\n \n for a_idx in np.where(set_A)[0]:\n # Compute inside set volume\n volume += self.degree[a_idx]\n\n for b_idx in np.where(set_B)[0]:\n # Compute boundaey betwen inside and outside sets\n perimeter += self.weights[a_idx, b_idx]\n \n # Compute isometric ratio\n iso_ratio = perimeter / volume\n return iso_ratio", "def island_perimeter(grid):\n w = len(grid[0])\n h = len(grid)\n perimeter = 0\n\n for i, col in enumerate(grid):\n for j, row in enumerate(col):\n if row == 1:\n perimeter += 4\n if grid[i][j-1] == 1:\n perimeter -= 1\n if grid[i][(j+1) % w] == 1:\n perimeter -= 1\n if grid[(i+1) % h][j] == 1:\n perimeter -= 1\n if grid[i-1][j] == 1:\n perimeter -= 1\n return perimeter", "def Lengths(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.element_type == \"line\":\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n else:\n # self.GetEdges()\n # coords = self.points[self.all_edges,:]\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n\n return lengths", "def ask_perimeter(self):\n\n print(\"controller - ask_perimeter!\")\n img_cv2_mask = self.pressure_img.mask\n if self.pressure_img.perimetre_done:\n self.view.popupmsg(\"El perímetre ja ha estat seleccionat\")\n else:\n self.pressure_img.roi_crop(img_cv2_mask, \"Perimeter\")", "def area(self):\n return self.radius*self.radius*math.pi", "def area_reg_polygon(sides: int, length: float) -> float:\r\n if not isinstance(sides, int) or sides < 3:\r\n raise ValueError(\r\n \"area_reg_polygon() only accepts integers greater than or \\\r\nequal to three as number of sides\"\r\n )\r\n elif length < 0:\r\n raise ValueError(\r\n \"area_reg_polygon() only accepts non-negative values as \\\r\nlength of a side\"\r\n )\r\n return (sides * length**2) / (4 * tan(pi / sides))\r\n return (sides * length**2) / (4 * tan(pi / sides))" ]
[ "0.721825", "0.69289505", "0.6759035", "0.6589213", "0.6564522", "0.65498984", "0.6483492", "0.6478432", "0.6431225", "0.6417622", "0.6385283", "0.6373984", "0.63636816", "0.636077", "0.6348714", "0.6287157", "0.62746215", "0.6269028", "0.6236131", "0.6204229", "0.6204229", "0.5961299", "0.59612936", "0.5935809", "0.58287364", "0.5769499", "0.5767257", "0.5755544", "0.5729385", "0.57146126", "0.5687516", "0.56741035", "0.5629137", "0.5625132", "0.5608847", "0.5572089", "0.5547673", "0.5542155", "0.55340725", "0.5525256", "0.54974407", "0.54343885", "0.5425712", "0.54012895", "0.5395551", "0.5362123", "0.5358603", "0.5351841", "0.5348603", "0.5335827", "0.530354", "0.5299339", "0.52891976", "0.52839553", "0.5282794", "0.52781767", "0.52777976", "0.52756387", "0.52756387", "0.52594626", "0.52568454", "0.52546865", "0.5250032", "0.5248012", "0.52429086", "0.52429086", "0.52423054", "0.5241915", "0.5237565", "0.52328205", "0.5220519", "0.52204543", "0.5217284", "0.5211429", "0.52111", "0.51967794", "0.5193463", "0.5189176", "0.517717", "0.51763594", "0.5172072", "0.51697236", "0.51695", "0.5158418", "0.5158418", "0.51565903", "0.5156008", "0.5155861", "0.51520646", "0.5148841", "0.51488274", "0.5148287", "0.51474833", "0.514516", "0.5141808", "0.5131304", "0.5128541", "0.5127326", "0.5121912", "0.51149625" ]
0.7875777
0
Calculate the rotated rectangle as a Box2D structure which contains
def __CalculateRotatedBox(self, contour): rectangle = cv2.minAreaRect(contour) box = cv2.boxPoints(rectangle) return np.int0(box)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_rotation(self, rectangle):\n if self._rotation == 90:\n return Rectangle(\n self._height - rectangle.y2,\n rectangle.x1,\n self._height - rectangle.y1,\n rectangle.x2,\n )\n if self._rotation == 180:\n return Rectangle(\n self._width - rectangle.x2,\n self._height - rectangle.y2,\n self._width - rectangle.x1,\n self._height - rectangle.y1,\n )\n if self._rotation == 270:\n return Rectangle(\n rectangle.y1,\n self._width - rectangle.x2,\n rectangle.y2,\n self._width - rectangle.x1,\n )\n return rectangle", "def determine_bounding_box_of_rotated_box(self, box, rotation_matrix):\n\n # top left, top right, bottom left, bottom right\n p1, p2, p3, p4 = box_points(box)\n\n # rotate all the points of the box\n tp1 = calc_rotate_point_with_rotation_matrix(p1, rotation_matrix)\n tp2 = calc_rotate_point_with_rotation_matrix(p2, rotation_matrix)\n tp3 = calc_rotate_point_with_rotation_matrix(p3, rotation_matrix)\n tp4 = calc_rotate_point_with_rotation_matrix(p4, rotation_matrix)\n\n # figure out which point has the furthest x distance, and the furthest y distance\n dx1 = abs(tp1[0] - tp4[0])\n dx2 = abs(tp2[0] - tp3[0])\n dy1 = abs(tp1[1] - tp4[1])\n dy2 = abs(tp2[1] - tp3[1])\n # the width and the height is the max distance between x and y\n w, h = max(dx1, dx2), max(dy1, dy2)\n\n # x and y is the min x, and min y among all points\n x = min(tp1[0], tp2[0], tp3[0], tp4[0])\n y = min(tp1[1], tp2[1], tp3[1], tp4[1])\n\n return (x, y, w, h)", "def minimum_rotated_rectangle(self): # -> BaseGeometry:\n ...", "def estimate_rotation(bounding_box):\n # x,y coord of topleft corner\n x,y,w,h = bounding_box\n rotation_arg = np.abs(1 - (h/float(w)))*2\n return rad_to_deg( np.arctan(rotation_arg) )", "def bbox_rotate(bbox: BoxInternalType, angle: float, method: str, rows: int, cols: int) -> BoxInternalType:\n x_min, y_min, x_max, y_max = bbox[:4]\n scale = cols / float(rows)\n if method == \"largest_box\":\n x = np.array([x_min, x_max, x_max, x_min]) - 0.5\n y = np.array([y_min, y_min, y_max, y_max]) - 0.5\n elif method == \"ellipse\":\n w = (x_max - x_min) / 2\n h = (y_max - y_min) / 2\n data = np.arange(0, 360, dtype=np.float32)\n x = w * np.sin(np.radians(data)) + (w + x_min - 0.5)\n y = h * np.cos(np.radians(data)) + (h + y_min - 0.5)\n else:\n raise ValueError(f\"Method {method} is not a valid rotation method.\")\n angle = np.deg2rad(angle)\n x_t = (np.cos(angle) * x * scale + np.sin(angle) * y) / scale\n y_t = -np.sin(angle) * x * scale + np.cos(angle) * y\n x_t = x_t + 0.5\n y_t = y_t + 0.5\n\n x_min, x_max = min(x_t), max(x_t)\n y_min, y_max = min(y_t), max(y_t)\n\n return x_min, y_min, x_max, y_max", "def rotate_box(corners, angle, cx, cy, h, w):\n # print(corners)\n corners = corners.reshape(-1, 2)\n corners = np.hstack((corners, np.ones((corners.shape[0], 1), dtype=type(corners[0][0]))))\n\n M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)\n\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n # adjust the rotation matrix to take into account translation\n M[0, 2] += (nW / 2) - cx\n M[1, 2] += (nH / 2) - cy\n # Prepare the vector to be transformed\n calculated = np.dot(M, corners.T).T\n\n calculated = calculated.reshape(-1, 8)\n\n return calculated", "def rotated_rect(w, h, angle):\n angle = math.radians(angle)\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return bb_w - 2 * x, bb_h - 2 * y", "def box_to_rect(box, color, linewidth=3):\n box = box.asnumpy()\n return plt.Rectangle(\n (box[0], box[1]), box[2] - box[0], box[3] - box[1],\n fill=False, edgecolor=color, linewidth=linewidth)", "def box_to_rect(box, color, linewidth=3):\r\n box = box.asnumpy()\r\n return plt.Rectangle(\r\n (box[0], box[1]), box[2]-box[0], box[3]-box[1],\r\n fill=False, edgecolor=color, linewidth=linewidth)", "def convertRect(rect):\n tmp_vet = np.zeros(4)\n \n tmp_vet[0] = rect[0][0]\n tmp_vet[1] = rect[0][1]\n tmp_vet[2] = rect[1][0]\n tmp_vet[3] = rect[1][1]\n \n return tmp_vet", "def rectangleRotation(a, b):\r\n\r\n line2 = (-1, sqrt(a**2 / 2))\r\n line4 = (-1, -sqrt(a**2 / 2))\r\n\r\n line1 = (1, sqrt(b**2 / 2))\r\n line3 = (1, -sqrt(b**2 / 2))\r\n\r\n tot = 0\r\n\r\n print(line2, line1)\r\n print(line3, line4)\r\n\r\n for xpts in range(-b * a, b * a):\r\n for ypts in range(-a * b, a * b):\r\n if (isunder(xpts, ypts, line1[0], line1[1]) and\r\n isunder(xpts, ypts, line2[0], line2[1]) and\r\n not isunder(xpts, ypts, line3[0], line3[1]) and\r\n not isunder(xpts, ypts, line4[0], line4[1])):\r\n tot += 1\r\n return tot", "def box_to_rect(box, color, linewidth=3):\r\n box = box.asnumpy()\r\n return plt.Rectangle(\r\n (box[0], box[1]), box[2] - box[0], box[3] - box[1],\r\n fill=False, edgecolor=color, linewidth=linewidth)", "def transformedRect(self, P):\n A = self.transformPointHeight(P, -1)\n B = self.transformPointHeight(P)\n C = self.transformPoint(P)\n D = self.transformPoint(P, -1)\n return A, B, C, D", "def box(self):\n r2 = self.radius\n res = [self.x - r2, self.y - r2, self.x + r2, self.y + r2]\n return res", "def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def getRect(self): # real signature unknown; restored from __doc__\r\n pass", "def rotate(image, rect, angle):\n new_image = pygame.transform.rotate(image, angle) # Rotate the original image without modifying it.\n rect = new_image.get_rect(center=rect.center) # Get a new rect with the center of the old rect.\n return new_image, rect", "def make_wander_box(self):\n x = int(self.location[0])\n y = int(self.location[1])\n box_list = []\n box_rects = []\n\n for i in range(x-3, x+4):\n box_list.append([i, y-3])\n box_list.append([i, y+3])\n\n for i in range(y-2, y+3):\n box_list.append([x-3, i])\n box_list.append([x+3, i])\n\n for box in box_list:\n left = box[0]*32\n top = box[1]*32\n box_rects.append(pg.Rect(left, top, 32, 32))\n\n return box_rects", "def _rectangle_corners(rectangle):\n corner_points = []\n for i1 in (.5, -.5):\n for i2 in (i1, -1 * i1):\n corner_points.append((rectangle['rectangle_center'][0] + i1 * rectangle['length_parallel'],\n rectangle['rectangle_center'][1] + i2 * rectangle['length_orthogonal']))\n\n return _rotate_points(rectangle['rectangle_center'], rectangle['unit_vector_angle'], corner_points)", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def rectangular_old(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n deltax = float(len1)/m\n deltay = float(len2)/n\n\n #Dictionary of vertex objects\n vertices = {}\n points = []\n\n for i in range(m+1):\n for j in range(n+1):\n vertices[i,j] = len(points)\n points.append([i*delta1 + origin[0], j*delta2 + origin[1]])\n\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n elements = []\n boundary = {}\n for i in range(m):\n for j in range(n):\n v1 = vertices[i,j+1]\n v2 = vertices[i,j]\n v3 = vertices[i+1,j+1]\n v4 = vertices[i+1,j]\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[(len(elements), 2)] = 'right'\n if j == 0:\n boundary[(len(elements), 1)] = 'bottom'\n elements.append([v4,v3,v2]) #Lower element\n\n if i == 0:\n boundary[(len(elements), 2)] = 'left'\n if j == n-1:\n boundary[(len(elements), 1)] = 'top'\n elements.append([v1,v2,v3]) #Upper element\n\n return points, elements, boundary", "def rotate(image, rect, angle):\n # Rotate the original image without modifying it.\n new_image = pg.transform.rotate(image, angle)\n # Get a new rect with the center of the old rect.\n rect = new_image.get_rect(center=rect.center)\n return new_image, rect", "def normalize_rect(rect):\n if rect[1][0] > rect[1][1]:\n # incoming rect can be a tuple so if swapping reassign the whole thing\n rect = (\n rect[0], # same center coordinates\n (rect[1][1], rect[1][0]), # swap height with width\n rect[2] + 90.0 if rect[2] < 0.0 else -90.0\n )\n return rect", "def rot_center(image,rect,angle):\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def rect_vertical_split(rect):\n\n left = rect.copy()\n left.width /= 2\n\n right = rect.copy()\n right.width = rect.width - left.width\n\n right.left = left.right\n return (left, right)", "def rectangle(xcenter, ycenter, width, height):\n x1, x2 = xcenter - width, xcenter + width\n y1, y2 = ycenter - height, ycenter + height\n return Shape([Point(x1, y1), Point(x1, y2), Point(x2, y2), Point(x2, y1)])", "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectangle(x1,y1,x2-x1,y2-y1)", "def afficher_rectangle(R):\n rectangle = R\n buttom_left = rectangle[0]\n S = rectangle[1]\n #print(\"rectangle = \", rectangle, \"et S = \", S)\n dx = abs(buttom_left[0] - S[0])\n dy = abs(buttom_left[1] - S[1])\n return([buttom_left[0], buttom_left[1], dx, dy])", "def find_square_box(box):\n width = box['bottom_right_x'] - box['top_left_x']\n height = box['bottom_right_y'] - box['top_left_y']\n if width <= height:\n offset = int((width - height) / 2)\n box['top_left_x'] = box['top_left_x'] - offset\n box['bottom_right_x'] = box['bottom_right_x'] + offset\n else:\n offset = int((height - width) / 2)\n box['top_left_y'] = box['top_left_y'] - offset\n box['bottom_right_y'] = box['bottom_right_y'] + offset\n return box", "def bbox_rot90(bbox: BoxInternalType, factor: int, rows: int, cols: int) -> BoxInternalType: # skipcq: PYL-W0613\n if factor not in {0, 1, 2, 3}:\n raise ValueError(\"Parameter n must be in set {0, 1, 2, 3}\")\n x_min, y_min, x_max, y_max = bbox[:4]\n if factor == 1:\n bbox = y_min, 1 - x_max, y_max, 1 - x_min\n elif factor == 2:\n bbox = 1 - x_max, 1 - y_max, 1 - x_min, 1 - y_min\n elif factor == 3:\n bbox = 1 - y_max, x_min, 1 - y_min, x_max\n return bbox", "def rectangular(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n delta1 = float(len1)/m\n delta2 = float(len2)/n\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class Index(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n\n index = Index(n,m)\n\n points = num.zeros((Np, 2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[index(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros((Nt, 3), int)\n boundary = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n nt = nt + 1\n i1 = index(i,j+1)\n i2 = index(i,j)\n i3 = index(i+1,j+1)\n i4 = index(i+1,j)\n\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[nt, 2] = 'right'\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2] #Lower element\n nt = nt + 1\n\n if i == 0:\n boundary[nt, 2] = 'left'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3] #Upper element\n\n return points, elements, boundary", "def crop_rotated_contour(self, plate, rect):\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n W = rect[1][0]\n H = rect[1][1]\n \n Xs = [i[0] for i in box]\n Ys = [i[1] for i in box]\n x1 = min(Xs)\n x2 = max(Xs)\n y1 = min(Ys)\n y2 = max(Ys)\n \n angle = rect[2]\n if angle < (-45):\n angle += 90\n \n # Center of rectangle in source image\n center = ((x1 + x2)/2,(y1 + y2)/2)\n\n # Size of the upright rectangle bounding the rotated rectangle\n size = (x2-x1, y2-y1)\n M = cv2.getRotationMatrix2D((size[0]/2, size[1]/2), angle, 1.0)\n\n # Cropped upright rectangle\n cropped = cv2.getRectSubPix(plate, size, center)\n cropped = cv2.warpAffine(cropped, M, size)\n croppedW = H if H > W else W\n croppedH = H if H < W else W\n\n # Final cropped & rotated rectangle\n croppedRotated = cv2.getRectSubPix(cropped, (int(croppedW), int(croppedH)), (size[0]/2, size[1]/2))\n return croppedRotated", "def rectangle(width, height):\r\n halfwidth = width*0.5\r\n halfheight = height*0.5\r\n v00 = (-halfwidth, -halfheight, 0, 1)\r\n v01 = (-halfwidth, halfheight, 0, 1)\r\n v10 = (halfwidth, -halfheight, 0, 1)\r\n v11 = (halfwidth, halfheight, 0, 1)\r\n n = (0,0,1,0)\r\n t = (1,0,0,0)\r\n b = (0,1,0,0)\r\n\r\n verts = N.array(\r\n v00 + n + t + b + (0,0) +\r\n v11 + n + t + b + (1,1) +\r\n v01 + n + t + b + (0,1) +\r\n v00 + n + t + b + (0,0) +\r\n v10 + n + t + b + (1,0) +\r\n v11 + n + t + b + (1,1) ,\r\n dtype=N.float32)\r\n\r\n indices = N.array((0,1,2,\r\n 3,4,5), dtype=N.uint32)\r\n\r\n return (N.array(verts, dtype=N.float32),\r\n N.array(indices,dtype=N.uint32))", "def get_rect (self) :\n return self.rect.copy()", "def Transformed(self, *args):\n return _Bnd.Bnd_Box2d_Transformed(self, *args)", "def box2poly(box):\n return Polytope.from_box(box)", "def compute_ray(self, box):\n if box[0, -1] > 0:\n warnings.warn('Box should have negative Z values.')\n\n size_x = np.linalg.norm(box[5] - box[1])\n size_y = np.linalg.norm(box[3] - box[1])\n size_z = np.linalg.norm(box[2] - box[1])\n size = np.asarray([size_x, size_y, size_z])\n box_o = Box.UNIT_BOX * size\n box_oh = np.ones((4, 9))\n box_oh[:3] = np.transpose(box_o)\n\n box_ch = np.ones((4, 9))\n box_ch[:3] = np.transpose(box)\n box_cht = np.transpose(box_ch)\n\n box_oct = np.matmul(box_oh, box_cht)\n box_cct_inv = np.linalg.inv(np.matmul(box_ch, box_cht))\n transform = np.matmul(box_oct, box_cct_inv)\n return transform[:3, 3:].reshape((3))", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def rectangledict(self):\n return rectangledict(self.rectangles)", "def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)", "def roi_rect(self):\n return (\n self.roi_x_offset, self.roi_y_offset,\n self.roi_x_size, self.roi_y_size,\n )", "def __bbox2square(self, bboxes):\n height = bboxes[:, 2] - bboxes[:, 0] + 1\n width = bboxes[:, 3] - bboxes[:, 1] + 1\n side = np.maximum(width, height).T\n bboxes[:, 0] += (height - side) * 0.5\n bboxes[:, 1] += (width - side) * 0.5\n bboxes[:, 2] = np.around(bboxes[:, 0] + side - 1);\n bboxes[:, 3] = np.around(bboxes[:, 1] + side - 1);\n bboxes[:, :2] = np.around(bboxes[:, :2])\n return bboxes", "def extract_bounding_boxes(self, scene):\n objs = scene[\"objects\"]\n rotation = scene[\"directions\"][\"right\"]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n\n for i, obj in enumerate(objs):\n [x, y, z] = obj[\"pixel_coords\"]\n\n [x1, y1, z1] = obj[\"3d_coords\"]\n\n cos_theta, sin_theta, _ = rotation\n\n x1 = x1 * cos_theta + y1 * sin_theta\n y1 = x1 * -sin_theta + y1 * cos_theta\n\n height_d = 6.9 * z1 * (15 - y1) / 2.0\n height_u = height_d\n width_l = height_d\n width_r = height_d\n\n if obj[\"shape\"] == \"cylinder\":\n d = 9.4 + y1\n h = 6.4\n s = z1\n\n height_u *= (s * (h / d + 1)) \\\n / ((s * (h / d + 1)) - (s * (h - s) / d))\n height_d = height_u * (h - s + d) / (h + s + d)\n\n width_l *= 11 / (10 + y1)\n width_r = width_l\n\n if obj[\"shape\"] == \"cube\":\n height_u *= 1.3 * 10 / (10 + y1)\n height_d = height_u\n width_l = height_u\n width_r = height_u\n\n ymin.append((y - height_d) / 320.0)\n ymax.append((y + height_u) / 320.0)\n xmin.append((x - width_l) / 480.0)\n xmax.append((x + width_r) / 480.0)\n\n return xmin, ymin, xmax, ymax", "def _compute_affine_cropper(bounding_box: BB, width_raw: int, height_raw: int) -> torch.Tensor: \n kx = (-1.0 + 2 * bounding_box.bx / width_raw).reshape(-1, 1)\n ky = (-1.0 + 2 * bounding_box.by / height_raw).reshape(-1, 1)\n sx = (bounding_box.bw / width_raw).reshape(-1, 1)\n sy = (bounding_box.bh / height_raw).reshape(-1, 1)\n zero = torch.zeros_like(kx)\n\n return torch.cat((sy, zero, ky, zero, sx, kx), dim=-1).reshape(-1, 2, 3)", "def largest_rotated_rect(w, h, angle):\r\n\r\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\r\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\r\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\r\n\r\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\r\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\r\n\r\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\r\n\r\n delta = math.pi - alpha - gamma\r\n\r\n length = h if (w < h) else w\r\n\r\n d = length * math.cos(alpha)\r\n a = d * math.sin(alpha) / math.sin(delta)\r\n\r\n y = a * math.cos(gamma)\r\n x = y * math.tan(gamma)\r\n\r\n return (\r\n bb_w - 2 * x,\r\n bb_h - 2 * y\r\n )", "def rot_center(image, rect, angle):\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = rot_image.get_rect(center=rect.center)\n\treturn rot_image,rot_rect", "def _transform_coordinates(rectangle, Q=np.matrix(((1, 1), (-1, 1)))):\n return tuple((rectangle[0]*Q).A1), tuple((rectangle[1]*Q).A1)", "def convert(size, box):\n # TODO rewrite box to be [TL, BR] coordinates\n #pdb.set_trace()\n dw = 1./size[0]\n dh = 1./size[1]\n x = (box[0] + box[1])/2.0\n y = (box[2] + box[3])/2.0\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)", "def largest_rotated_rect(w, h, angle):\n\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return (\n bb_w - 2 * x,\n bb_h - 2 * y\n )", "def largest_rotated_rect(w, h, angle):\n\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return (\n bb_w - 2 * x,\n bb_h - 2 * y\n )", "def largest_rotated_rect(w, h, angle):\n\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return (\n bb_w - 2 * x,\n bb_h - 2 * y\n )", "def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r", "def detection2roi(detection, detection2roi_method='box'):\n if detection2roi_method == 'box':\n # compute box center and scale\n # use mediapipe/calculators/util/detections_to_rects_calculator.cc\n xc = (detection[:, 1] + detection[:, 3]) / 2\n yc = (detection[:, 0] + detection[:, 2]) / 2\n scale = (detection[:, 3] - detection[:, 1]) # assumes square boxes\n\n elif detection2roi_method == 'alignment':\n # compute box center and scale\n # use mediapipe/calculators/util/alignment_points_to_rects_calculator.cc\n xc = detection[:, 4+2*kp1]\n yc = detection[:, 4+2*kp1+1]\n x1 = detection[:, 4+2*kp2]\n y1 = detection[:, 4+2*kp2+1]\n scale = np.sqrt(((xc-x1)**2 + (yc-y1)**2)) * 2\n else:\n raise NotImplementedError(\n \"detection2roi_method [%s] not supported\" % detection2roi_method)\n\n yc += dy * scale\n scale *= dscale\n\n # compute box rotation\n x0 = detection[:, 4+2*kp1]\n y0 = detection[:, 4+2*kp1+1]\n x1 = detection[:, 4+2*kp2]\n y1 = detection[:, 4+2*kp2+1]\n theta = np.arctan2(y0-y1, x0-x1) - theta0\n return xc, yc, scale, theta", "def __repr__(self):\n return \"Box(mins={}, maxs={}, angles={})\".format(\n self.mins, self.maxs, self.angles\n )", "def normalize_rectangle(rect):\n assert len(rect) == 4, 'Rectangles must contain 4 coordinates'\n x0, y0, x1, y1 = rect\n assert x0 < x1, 'Invalid X coordinates'\n assert y0 < y1, 'Invalid Y coordinates'\n\n dx = x1 - x0\n dy = y1 - y0\n if dx > dy:\n scaled = float(dx) / dy\n upper_x, upper_y = 1.0, scaled\n else:\n scaled = float(dx) / dy\n upper_x, upper_y = scaled, 1.0\n\n assert 0 < upper_x <= 1.0, 'Calculated upper X coordinate invalid'\n assert 0 < upper_y <= 1.0, 'Calculated upper Y coordinate invalid'\n\n return (0, 0, upper_x, upper_y)", "def isRectangle( pathGroup):\n #print 'xxxxxxxx isRectangle',pathGroups\n if isinstance(pathGroup, Circle ): return None\n segmentList = [p for p in pathGroup.listOfPaths if p.isSegment() ]#or p.effectiveNPoints >0]\n if len(segmentList) != 4:\n debug( 'rectangle Failed at length ', len(segmentList))\n return None\n a,b,c,d = segmentList\n\n if length(a.point1, d.pointN)> 0.2*(a.length+d.length)*0.5:\n debug('rectangle test failed closing ', length(a.point1, d.pointN), a.length, d.length)\n return None\n \n Aac , Abd = closeAngleAbs(a.angle,c.angle), closeAngleAbs(b.angle , d.angle)\n if min(Aac,Abd) > 0.07 or max(Aac, Abd) >0.27 :\n debug( 'rectangle Failed at angles', Aac, Abd)\n return None\n notsimilarL = lambda d1,d2: abs(d1-d2)>0.20*min(d1,d2)\n\n pi , twopi = numpy.pi,2*numpy.pi\n angles = numpy.array( [p.angle for p in segmentList] )\n minAngleInd = numpy.argmin( numpy.minimum( abs(angles), abs( abs(angles)-pi), abs( abs(angles)-twopi) ) )\n rotAngle = angles[minAngleInd]\n width = (segmentList[minAngleInd].length + segmentList[(minAngleInd+2)%4].length)*0.5\n height = (segmentList[(minAngleInd+1)%4].length + segmentList[(minAngleInd+3)%4].length)*0.5\n # set rectangle center as the bbox center\n x,y,w,h = computeBox( numpy.concatenate( [ p.points for p in segmentList]) )\n r = Rectangle( numpy.array( [x+w/2, y+h/2]), (width, height), rotAngle, pathGroup.listOfPaths, pathGroup.refNode)\n \n debug( ' found a rectangle !! ', a.length, b.length, c.length, d.length )\n return r", "def optimise_bbox(self, box_width, box_height):\n target = box_width/box_height\n\n angles = []\n spatial_eff = [] # spatial efficiency\n for angle in np.arange(-90, 91, 1):\n r_rotated = self.rotate(angle)\n spatial_ratio = abs(r_rotated.width()/r_rotated.height())\n\n angles.append(angle)\n spatial_eff.append(abs(spatial_ratio - target))\n\n angles = np.array(angles)\n spatial_eff = np.array(spatial_eff)\n\n idx = spatial_eff.argmin()\n angle = angles[idx]\n\n return self.rotate(angle)", "def __rotate_single_bbox(bbox, image_height, image_width, degrees):\n image_height, image_width = (\n tf.cast(image_height, tf.float32), tf.cast(image_width, tf.float32))\n\n # Convert from degrees to radians.\n degrees_to_radians = math.pi / 180.0\n radians = degrees * degrees_to_radians\n\n # Translate the bbox to the center of the image and turn the normalized 0-1\n # coordinates to absolute pixel locations.\n # Y coordinates are made negative as the y axis of images goes down with\n # increasing pixel values, so we negate to make sure x axis and y axis points\n # are in the traditionally positive direction.\n min_y = -tf.cast(image_height * (bbox[0] - 0.5), tf.int32)\n min_x = tf.cast(image_width * (bbox[1] - 0.5), tf.int32)\n max_y = -tf.cast(image_height * (bbox[2] - 0.5), tf.int32)\n max_x = tf.cast(image_width * (bbox[3] - 0.5), tf.int32)\n coordinates = tf.stack(\n [[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])\n coordinates = tf.cast(coordinates, tf.float32)\n # Rotate the coordinates according to the rotation matrix clockwise if\n # radians is positive, else negative\n rotation_matrix = tf.stack(\n [[tf.cos(radians), tf.sin(radians)],\n [-tf.sin(radians), tf.cos(radians)]])\n new_coords = tf.cast(\n tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)\n # Find min/max values and convert them back to normalized 0-1 floats.\n min_y = -(tf.cast(tf.reduce_max(new_coords[0, :]), tf.float32) / image_height - 0.5)\n min_x = tf.cast(tf.reduce_min(new_coords[1, :]), tf.float32) / image_width + 0.5\n max_y = -(tf.cast(tf.reduce_min(new_coords[0, :]), tf.float32) / image_height - 0.5)\n max_x = tf.cast(tf.reduce_max(new_coords[1, :]), tf.float32) / image_width + 0.5\n\n # Clip the bboxes to be sure the fall between [0, 1].\n min_y, min_x, max_y, max_x = __clip_bbox(min_y, min_x, max_y, max_x)\n min_y, min_x, max_y, max_x = __check_bbox_area(min_y, min_x, max_y, max_x)\n\n return tf.stack([min_y, min_x, max_y, max_x])", "def cuboid_to_2d_frustum_bbox(corners: np.ndarray, planes: List[np.ndarray], K: np.ndarray) -> np.ndarray:\n\n def clip_line_segment(pt_a: np.ndarray, pt_b: np.ndarray, K: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Clip a line segment based on two points and the camera instrinc matrix.\n\n Args:\n pt_a: One 3D point vector constraining a line segment\n pt_b: One 3D point vector constraining a line segment\n K: A 3x3 array representing a camera intrinsic matrix\n\n Returns:\n a, b: A tuple of the clipped line segment 3D point vectors\n \"\"\"\n pt_a = K.dot(pt_a)\n pt_a /= pt_a[2]\n\n pt_b = K.dot(pt_b)\n pt_b /= pt_b[2]\n\n return np.round(pt_a).astype(np.int32), np.round(pt_b).astype(np.int32)\n\n def clip_rect(selected_corners: np.ndarray, clipped_uv_verts: np.ndarray) -> np.ndarray:\n \"\"\"Clip a rectangle based on the selected corners and clipped vertices coordinates.\n\n Args:\n selected_corners: A list of selected corners\n clipped_uv_verts: A list of clipped vertices\n\n Returns:\n A new list of clipped vertices based on the selected corners\n \"\"\"\n prev = selected_corners[-1]\n for corner in selected_corners:\n # interpolate line segments to the image border\n clip_prev, clip_corner = clip_segment_v3_plane_n(\n copy.deepcopy(prev), copy.deepcopy(corner), copy.deepcopy(planes)\n )\n prev = corner\n if clip_prev is None or clip_corner is None:\n continue\n a, b = clip_line_segment(clip_prev, clip_corner, K)\n clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])\n clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])\n\n return clipped_uv_verts\n\n clipped_uv_verts = np.zeros((0, 2))\n # Draw the sides\n for i in range(4):\n corner_f = corners[i] # front corner\n corner_b = corners[i + 4] # back corner\n\n clip_c_f, clip_c_b = clip_segment_v3_plane_n(corner_f, corner_b, planes)\n if clip_c_f is None or clip_c_b is None:\n continue\n a, b = clip_line_segment(clip_c_f, clip_c_b, K)\n\n clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])\n clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n front_verts = clip_rect(corners[:4], clipped_uv_verts)\n back_verts = clip_rect(corners[4:], clipped_uv_verts)\n\n clipped_uv_verts = np.vstack([clipped_uv_verts, front_verts.reshape(-1, 2)])\n clipped_uv_verts = np.vstack([clipped_uv_verts, back_verts.reshape(-1, 2)])\n\n if clipped_uv_verts.shape[0] == 0:\n return None\n\n bbox_2d = compute_point_cloud_bbox(clipped_uv_verts)\n return bbox_2d", "def get_box(ra0, ra1, dec0, dec1):\n\n box = np.array([[dec0, ra1], [dec1, ra0]]) * np.pi / 180\n\n return box", "def test_2d_object(gridsize=50):\n\n obj = object25d() \n\n #you can load a 3D object, Z axis gets ignored\n obj.load('objects/sphere.obj')\n\n #obj.prim_square()\n #obj.prim_triangle()\n\n #obj.save('2d_square.obj')\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n\n #rotate the points by matrix multiplication \n obj.points = m22.batch_mult_pts( obj.points ) \n\n #saving a 2d object from 3D flattens it on Z axis. \n #utterly mangles the topology \n obj.save('2d_rotated.obj')\n\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=200, pfb=fb)\n bloody_simple_2drender('2d_rotation.png', obj=[obj], gridsize=200, pfb=fb)\n\n fb.save('2d_rotation.png')", "def get_rect(self):\n return self.pic.get_rect().move(self.pos)", "def draw_rectangle(t, w, h):\r\n for i in range(2):\r\n t.forward(w)\r\n t.left(90)\r\n t.forward(h)\r\n t.left(90)", "def p2(self):\n return tuple(self.rect[2:])", "def rect(r, theta):\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x,y", "def bbox_shift_scale_rotate(bbox, angle, scale, dx, dy, rotate_method, rows, cols, **kwargs): # skipcq: PYL-W0613\n height, width = rows, cols\n center = (width / 2, height / 2)\n if rotate_method == \"ellipse\":\n x_min, y_min, x_max, y_max = bbox_rotate(bbox, angle, rotate_method, rows, cols)\n matrix = cv2.getRotationMatrix2D(center, 0, scale)\n else:\n x_min, y_min, x_max, y_max = bbox[:4]\n matrix = cv2.getRotationMatrix2D(center, angle, scale)\n matrix[0, 2] += dx * width\n matrix[1, 2] += dy * height\n x = np.array([x_min, x_max, x_max, x_min])\n y = np.array([y_min, y_min, y_max, y_max])\n ones = np.ones(shape=(len(x)))\n points_ones = np.vstack([x, y, ones]).transpose()\n points_ones[:, 0] *= width\n points_ones[:, 1] *= height\n tr_points = matrix.dot(points_ones.T).T\n tr_points[:, 0] /= width\n tr_points[:, 1] /= height\n\n x_min, x_max = min(tr_points[:, 0]), max(tr_points[:, 0])\n y_min, y_max = min(tr_points[:, 1]), max(tr_points[:, 1])\n\n return x_min, y_min, x_max, y_max", "def get_rect(self, i, j):\n x = self.x0 + j * self.dx\n y = self.y0 + i * self.dy\n return Rect(x, y, self.dx, self.dy)", "def psychopy_rectangle(\n window,\n x=0,\n y=0,\n size_width=1,\n size_height=1,\n rotate=0,\n color=\"black\",\n outline=0,\n outline_color=\"black\",\n alpha=1,\n adjust_width=False,\n adjust_height=False,\n **kwargs,\n):\n # Try loading psychopy\n try:\n from psychopy import visual\n except ImportError:\n raise ImportError(\n \"The 'psychopy' module is required for this function to run. \",\n \"Please install it first (`pip install PsychoPy`).\",\n )\n\n # Adjust size for screen ratio\n if adjust_width is True:\n size_width = size_width * (window.size[1] / window.size[0])\n if adjust_height is True:\n size_height = size_height * (window.size[0] / window.size[1])\n\n # Get coordinates\n x1, y1, x2, y2 = _coord_rectangle(image=window, x=x, y=y, size_width=size_width,\n size_height=size_height, method=\"psychopy\")\n\n # Rectangle parameters\n rect = visual.Rect(\n win=window,\n units='pix',\n width=x2-x1,\n height=y2-y1,\n fillColor=color,\n lineWidth=outline,\n **kwargs,\n )\n x = (x1 + x2)/2\n y = (y1 + y2)/2\n rect.pos = [x-window.size[0]/2, y-window.size[1]/2]\n rect.lineColor = outline_color\n\n # Alpha\n if alpha > 0:\n rect.opacity = alpha\n\n # Orientation\n if rotate != 0:\n rect.ori = rotate\n\n # Display\n rect.draw()", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def rotate(block, direction):\n block.coords = [\n (\n int(block.center[0] + direction * (y - block.center[1])),\n int(block.center[1] + direction * (block.center[0] - x))\n )\n for (x, y) in block.coords]\n widthAndHeight(block)", "def oriented_bounding_box(\n self, x_axis: Vector3D, y_axis: Optional[Vector3D] = None, name: str = None) -> 'Box':\n x_axis = x_axis.copy()\n x_axis.normalize()\n if not y_axis:\n y_axis = _get_arbitrary_perpendicular_unit_vector(x_axis)\n else:\n y_axis = y_axis.copy()\n y_axis.normalize()\n\n self_body = _union_entities(self.bodies)\n bounding_box = app().measureManager.getOrientedBoundingBox(self_body, x_axis, y_axis)\n\n box = Box(\n bounding_box.length,\n bounding_box.width,\n bounding_box.height,\n name=name or \"OrientedBoundingBox\")\n\n z_axis = x_axis.crossProduct(y_axis)\n matrix = Matrix3D.create()\n matrix.setToAlignCoordinateSystems(\n Point3D.create(bounding_box.length / 2, bounding_box.width / 2, bounding_box.height/2),\n Vector3D.create(1, 0, 0),\n Vector3D.create(0, 1, 0),\n Vector3D.create(0, 0, 1),\n bounding_box.centerPoint,\n x_axis,\n y_axis,\n z_axis)\n\n box.transform(matrix)\n return box", "def drawRectangle(width, height, tilt, penColor, fillColor):\n Lucia.color(penColor,fillColor)\n Lucia.seth(tilt)\n Lucia.begin_fill()\n for i in range(2):\n Lucia.forward(width)\n Lucia.left(90)\n Lucia.forward(height)\n Lucia.left(90)\n Lucia.end_fill()", "def get_boxcorners(places, rotates, size):\n corners = []\n for place, rotate, sz in zip(places, rotates, size):\n x, y, z = place\n h, w, l = sz\n if l > 10:\n continue\n\n corner = np.array([\n [x - l / 2., y - w / 2., z],\n [x + l / 2., y - w / 2., z],\n [x - l / 2., y + w / 2., z],\n [x - l / 2., y - w / 2., z + h],\n [x - l / 2., y + w / 2., z + h],\n [x + l / 2., y + w / 2., z],\n [x + l / 2., y - w / 2., z + h],\n [x + l / 2., y + w / 2., z + h],\n ])\n # corner = np.array([\n # [x - l / 2., y - w / 2., z], #bottom surface bottom right (0)\n # # [x + l / 2., y - w / 2., z], #bottom surface top right (1)\n # [x - l / 2., y + w / 2., z], #bottom surface bottom left (2)\n # [x - l / 2., y - w / 2., z + h],#bottom right (3)\n # [x - l / 2., y + w / 2., z + h],#bottom left (4)\n # # [x + l / 2., y + w / 2., z],#bottom surface top left (5)\n # # [x + l / 2., y - w / 2., z + h],# top right (6)\n # # [x + l / 2., y + w / 2., z + h],# top left (7)\n # ])\n\n corner -= np.array([x, y, z])\n\n rotate_matrix = np.array([\n [np.cos(rotate), -np.sin(rotate), 0],\n [np.sin(rotate), np.cos(rotate), 0],\n [0, 0, 1]\n ])\n\n a = np.dot(corner, rotate_matrix.transpose())\n a += np.array([x, y, z])\n corners.append(a)\n return np.array(corners).astype(np.float32)\n\n\n\n def get_boxcorners_front(places, rotates, size):\n \"\"\"Create 8 corners of bounding box from bottom center.\"\"\"\n corners = []\n for place, rotate, sz in zip(places, rotates, size):\n x, y, z = place\n h, w, l = sz\n if l > 10:\n continue\n\n # corner = np.array([\n # [x - l / 2., y - w / 2., z],\n # [x + l / 2., y - w / 2., z],\n # [x - l / 2., y + w / 2., z],\n # [x - l / 2., y - w / 2., z + h],\n # [x - l / 2., y + w / 2., z + h],\n # [x + l / 2., y + w / 2., z],\n # [x + l / 2., y - w / 2., z + h],\n # [x + l / 2., y + w / 2., z + h],\n # ])\n corner = np.array([\n [x - l / 2., y - w / 2., z], #bottom surface top right (0)\n # [x + l / 2., y - w / 2., z], #bottom surface bottom right (1)\n [x - l / 2., y + w / 2., z], #bottom surface top left (2)\n [x - l / 2., y - w / 2., z + h],#top surface top right (3)\n [x - l / 2., y + w / 2., z + h],#top surface top left (4)\n # [x + l / 2., y + w / 2., z],#bottom surface bottom left (5)\n # [x + l / 2., y - w / 2., z + h],# top surface bottom right (6)\n # [x + l / 2., y + w / 2., z + h],# top surface bottom left (7)\n ])\n\n # corner = np.array([\n # [x, y, z], #bottom surface bottom left (0) \n # [x, yy, z], #bottom surface right (1)\n # [xx, y, z], #bottom surface top left (2)(2)\n # [xx, yy, z],#bottom surface top right (3)(0)\n # [x, y, zz], #top surface left (4)\n # [x, yy, zz], #top surface right (5)\n # [xx, y, zz], #top surface top left (6)(4)\n # [xx, yy, zz],#top surface top right (7)(3)\n # ])\n\n corner -= np.array([x, y, z])\n\n rotate_matrix = np.array([\n [np.cos(rotate), -np.sin(rotate), 0],\n [np.sin(rotate), np.cos(rotate), 0],\n [0, 0, 1]\n ])\n\n a = np.dot(corner, rotate_matrix.transpose())\n a += np.array([x, y, z])\n corners.append(a)\n return np.array(corners).astype(np.float32)", "def rotate_struct(struct, residue, axis, normal) :\n normv = np.zeros(3)\n normv[[\"x\",\"y\",\"z\"].index(normal)] = 1.0\n\n # Move structure to center of coordinates\n res_xyz = residue.collect(\"xyz\")\n center = res_xyz.mean(axis=0)\n res_xyz -= center\n xyz = struct.xyz - center\n xyz = xyz - center\n\n # Find the rotation matrix and rotate the full structure\n rotvec = geo.rotaxis(axis,normv)\n alpha = geo.angle(axis,normv)\n rotmat = geo.rotation_matrix(alpha,rotvec)\n xyz = fitting.rotate(xyz,rotmat)+center\n struct.update_xyz(xyz)", "def create_box(self, a, b, c):\n proj_to_xy = lambda x: x[:2]\n get_angle = lambda x,y: (x @ y) / (np.linalg.norm(x) * np.linalg.norm(y))\n\n ab = proj_to_xy(b) - proj_to_xy(a)\n ac = proj_to_xy(c) - proj_to_xy(a)\n bc = proj_to_xy(c) - proj_to_xy(b)\n\n ab_ac = np.abs(get_angle(ab, ac))\n ab_bc = np.abs(get_angle(ab, bc))\n\n x1, y1, z1 = a\n x2, y2, z2 = b\n x3, y3, z3 = c\n\n z = (z1 + z2)/2\n\n down = np.array([0., 0., z - z3])\n\n if ab_ac < ab_bc: # 3. point is bottom-left\n back = np.array([ac[0], ac[1], 0])\n else: # 3. point is bottom-right\n back = np.array([bc[0], bc[1], 0])\n\n tfl = np.array([x1, y1, z])\n tfr = np.array([x2, y2, z])\n\n tbl = tfl + back\n tbr = tfr + back\n\n bfl = tfl - down\n bfr = tfr - down\n\n bbl = bfl + back\n bbr = bfr + back\n\n return np.array([\n tfl, tfr,\n tbl, tbr,\n bfl, bfr,\n bbl, bbr\n ])", "def rectangle_graph():\n scaled = scale((200, 200, 200), 2)\n print scaled\n pylon_graph = graph.graph()\n base = rectangle(ORIGIN, WIDTH, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n pylon_graph.connect_neighbours(base_ids, WIDTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n pylon_graph.connect_neighbours(all_ids, WIDTH)\n return pylon_graph", "def drawRectangle_2():\n\n # Calculate the coordinates for the four corners of the rectangle\n x1 = Lucia.xcor()\n y1 = Lucia.ycor()\n x2 = x1 + 50\n y2 = y1\n x3 = x2\n y3 = y2 + 100\n x4 = x1\n y4 = y1 + 100\n Lucia.color(\"green\",\"yellow\") # set the pen and fill colors\n Lucia.begin_fill()\n # Command the turtle to visit the four corners in order\n Lucia.goto(x2, y2)\n Lucia.goto(x3, y3)\n Lucia.goto(x4, y4)\n Lucia.goto(x1, y1)\n\n Lucia.end_fill()", "def move_rectangle(old_rec,dx,dy):\n new_rec = Rectangle()\n new_rec.height = old_rec.height\n new_rec.width = old_rec.width\n new_rec.corner = Point()\n new_rec.corner.x = old_rec.corner.x + dx\n new_rec.corner.y = old_rec.corner.y + dy\n return new_rec", "def getRect(self):\n return self.rect()", "def polar_from_rectangular(width_pol, height_pol, width_reg, height_reg):\n xcenter = (width_reg - 1.0) * 0.5\n ycenter = (height_reg - 1.0) * 0.5\n r_max = np.floor(max(xcenter, ycenter))\n xlist = (np.flipud(np.arange(width_reg)) - xcenter) * width_pol / r_max\n ylist = (np.flipud(np.arange(height_reg)) - ycenter) * width_pol / r_max\n x_mat, y_mat = np.meshgrid(xlist, ylist)\n r_mat = np.float32(\n np.clip(np.sqrt(x_mat ** 2 + y_mat ** 2), 0, width_pol - 1))\n theta_mat = np.float32(np.clip(\n (np.pi + np.arctan2(y_mat, x_mat)) * (height_pol - 1) / (2 * np.pi), 0,\n height_pol - 1))\n return r_mat, theta_mat", "def project_ref_to_rect(self, pts_3d_ref):\n ''' Input and Output are nx3 points '''\n return np.transpose(np.dot(self.R0, np.transpose(pts_3d_ref)))", "def project_ref_to_rect(self, pts_3d_ref):\n ''' Input and Output are nx3 points '''\n return np.transpose(np.dot(self.R0, np.transpose(pts_3d_ref)))", "def rotated(self, angle):\n vx, vy = self\n ca, sa = cos_sin_deg(angle)\n return tuple.__new__(Vec2, (vx * ca - vy * sa, vx * sa + vy * ca))", "def ClippedArea(rectangle):\n _, x0, y0, x1, y1 = rectangle\n clipped_width = max(0, min(width, x1) - max(0, x0))\n clipped_height = max(0, min(height, y1) - max(0, y0))\n return clipped_width * clipped_height", "def topo2rect(p, b, N=64):\n az, el = map(radians, p)\n \n r = cos(el)\n x = (r * sin(az) - b.o.x) * b.M\n y = (r * cos(az) - b.o.y) * b.M\n \n return rect(int(N * (x * b.i.x + y * b.i.y)),\n int(N * (x * b.j.x + y * b.j.y)))", "def geo2rect(self, geo):\n\n s = sin(geo.lat);\n RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));\n\n return CRECT((\n (RN + geo.h) * cos(geo.lat) * cos(geo.lon),\n (RN + geo.h) * cos(geo.lat) * sin(geo.lon),\n ((RN * (1.0 - self.earth_e2)) + geo.h) * sin(geo.lat)\n ))", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def _get_render_area(self):\n # take off our page margins\n render_area = Box2d(\n self._margin,\n self._margin,\n self._pagesize[0] -\n self._margin,\n self._pagesize[1] -\n self._margin)\n\n # then if user specified a box to render get intersection with that\n if self._box:\n return render_area.intersect(self._box)\n\n return render_area", "def normalizeBox(box):\n x, y, w, h = box\n if w < 0:\n x += (w+1)\n w *= -1\n if h < 0:\n y += (h+1)\n h *= -1\n return (x, y, w, h)", "def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))", "def rectangular_from_polar(width_reg, height_reg, width_pol, height_pol):\n xcenter = (width_reg - 1.0) * 0.5\n ycenter = (height_reg - 1.0) * 0.5\n r_max = np.floor(max(xcenter, ycenter))\n r_list = np.linspace(0.0, r_max, width_pol)\n theta_list = np.arange(0.0, height_pol, 1.0) * 2 * np.pi / (height_pol - 1)\n r_mat, theta_mat = np.meshgrid(r_list, theta_list)\n x_mat = np.float32(\n np.clip(xcenter + r_mat * np.cos(theta_mat), 0, width_reg - 1))\n y_mat = np.float32(\n np.clip(ycenter + r_mat * np.sin(theta_mat), 0, height_reg - 1))\n return x_mat, y_mat", "def revert(self, image, augmented_image, boundingBoxes):\n\n cols, rows = augmented_image.size\n ori_cols, ori_rows = image.size\n\n cx = cols//2.0\n cy = rows//2.0\n\n # calculate the new bounds after the revert\n nH, nW = self.compute_bound(augmented_image, -self._angle)\n\n # calculate the coordinates change because the rotations\n delta_width = (nW - ori_cols)//2\n delta_height = (nH - ori_rows)//2\n\n new_boxes = []\n for bb in boundingBoxes:\n\n # get a bounding box\n new_bb = [(bb[0], bb[1]), (bb[2], bb[1]), (bb[0], bb[3]), (bb[2], bb[3])]\n\n # revert the rotation of the BB\n new_bb = self.rotate_box(new_bb, cx, cy, rows, cols)\n\n # revert the offset of the BB\n new_bb = [(p[0] - delta_width, p[1] - delta_height) for p in new_bb]\n\n # take the BB of the BB\n new_bb = [max(0, min([x[0] for x in new_bb])),\n max(0, min([x[1] for x in new_bb])),\n min(image.size[0], max([x[0] for x in new_bb])),\n min(image.size[1], max([x[1] for x in new_bb])), bb[4], bb[5]]\n\n new_boxes.append(new_bb)\n\n return np.array(new_boxes)", "def getRect(self):\n return self.rect", "def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if (self.rotation is None and self.magnification is None and\n self.x_reflection is None):\n key = self\n else:\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection)\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))", "def bbox_to_rect(bbox, color):\n return plt.Rectangle(\n xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],\n fill=False, edgecolor=color, linewidth=2\n )", "def bbox_iou(box1, box2, x1y1x2y2=True):\n if not x1y1x2y2:\n # Transform from center and width to exact coordinates\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\n else:\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\n\n # get the corrdinates of the intersection rectangle\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n # Intersection area\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(\n inter_rect_y2 - inter_rect_y1 + 1, min=0\n )\n # Union Area\n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)\n\n iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)\n\n return iou", "def get_roi_rect(self):\n return self.rect_list", "def perimRect(length, width):\n return 2 * (length + width)", "def rotate((x,y)):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n w,h = image_size()\n if orientation == 0: return (x,y)\n if orientation == -90: return (h-y,x)\n if orientation == 90: return (y,w-x)\n if orientation == 180: return (w-x,h-y)\n return (x,y)" ]
[ "0.7022365", "0.667482", "0.64325494", "0.641342", "0.6358206", "0.6105708", "0.60524374", "0.604659", "0.604289", "0.602959", "0.60229516", "0.6003215", "0.5952361", "0.58678347", "0.58623916", "0.5842129", "0.5837796", "0.58315367", "0.5814212", "0.5811554", "0.58072996", "0.5791181", "0.57812804", "0.57095814", "0.56917185", "0.5689231", "0.5688774", "0.5684974", "0.56424433", "0.5631062", "0.56200945", "0.5617089", "0.56162316", "0.5615514", "0.5608711", "0.5601977", "0.55980307", "0.5584683", "0.5583414", "0.5569573", "0.55638975", "0.5556174", "0.5553265", "0.5548066", "0.5536268", "0.5511543", "0.5508237", "0.5487237", "0.5476396", "0.5476396", "0.5476396", "0.5455496", "0.54548895", "0.54541093", "0.5451568", "0.54464775", "0.54357594", "0.54197097", "0.54044574", "0.5398938", "0.53917414", "0.53886074", "0.53885573", "0.5388099", "0.5387939", "0.53863853", "0.5381026", "0.5380191", "0.53576154", "0.5356426", "0.53560036", "0.535586", "0.5348633", "0.5346909", "0.5342707", "0.5342497", "0.5339802", "0.5337849", "0.53355175", "0.53341013", "0.53332996", "0.53330976", "0.53330976", "0.5331158", "0.5330841", "0.5330519", "0.53262365", "0.5325529", "0.5307059", "0.53013074", "0.53012484", "0.52992356", "0.5293107", "0.5290596", "0.52872956", "0.52865034", "0.52857846", "0.5279989", "0.52778816", "0.52688336" ]
0.6074911
6
Converts UTM coordinates into latitude/longitude. assumes rows are easting, northing, zone number, either 'N' for northern hemisphere or 'S' for southern hemisphere
def utm_to_latlong(input_data_file=None, output_data_file=None, log_file=None, log_level=DEFAULT_LOG_LEVEL): logger = logger_message(__name__, log_file, log_level) # Check required input and output data file names were given. assert input_data_file is not None, 'An input CSV file with columns of values.' assert output_data_file is not None, 'An output CSV file to write new values.' _in = open(input_data_file, 'r') try: _out = open(output_data_file, 'w') try: data = csv.reader(_in) output = csv.writer(_out) for row_ind, row in enumerate(data): east = float(row[0]) north = float(row[1]) zone = int(row[2]) latlong = utm.to_latlon(east, north, zone, northern=('N' == row[3])) logger.info('Changed row {} from: {} to: {}'.format(row_ind, (row[0], row[1]), latlong)) output.writerow(latlong) finally: _out.close() finally: _in.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def lonlat_from_utm(eastings, northings, zone, ellipsoid='WGS84', datum='WGS84', parallel=False):\n\n eastings = to_list(eastings)\n northings = to_list(northings)\n zone = to_list(zone)\n\n inverse = True\n\n npos = len(eastings)\n if npos != len(northings):\n raise ValueError('Supplied eastings and northings are not the same size.')\n\n # If we've been given a single zone and multiple coordinates, make a\n # list of zones so we can do things easily in parallel.\n if len(zone) != npos:\n zone = zone * npos\n\n # Do this in parallel unless we only have a small number of positions or\n # we've been told not to.\n if npos > 1 and parallel:\n pool = multiprocessing.Pool()\n arguments = zip(eastings, northings, zone,\n [ellipsoid] * npos,\n [datum] * npos,\n [inverse] * npos)\n results = pool.map(__convert, arguments)\n results = np.asarray(results)\n lon, lat = results[:, 0], results[:, 1]\n pool.close()\n elif npos > 1 and not parallel:\n lon, lat = [], []\n for pos in zip(eastings, northings, zone, [ellipsoid] * npos, [datum] * npos, [inverse] * npos):\n result = __convert(pos)\n lon.append(result[0])\n lat.append(result[1])\n else:\n # The eastings, northings and zone will all be lists here, For\n # cross-python2/python3 support, we can't just * them, so assume\n # the first value in the list is what we want.\n lon, lat = __convert((eastings[0], northings[0], zone[0], ellipsoid, datum, inverse))\n\n return np.asarray(lon), np.asarray(lat)", "def convert_from_latlon_to_utm(points=None,\r\n latitudes=None,\r\n longitudes=None,\r\n false_easting=None,\r\n false_northing=None):\r\n\r\n old_geo = Geo_reference() \r\n utm_points = []\r\n if points == None:\r\n assert len(latitudes) == len(longitudes)\r\n points = map(None, latitudes, longitudes)\r\n \r\n for point in points:\r\n \r\n zone, easting, northing = redfearn(float(point[0]),\r\n float(point[1]),\r\n false_easting=false_easting,\r\n false_northing=false_northing)\r\n new_geo = Geo_reference(zone)\r\n old_geo.reconcile_zones(new_geo) \r\n utm_points.append([easting, northing])\r\n\r\n return utm_points, old_geo.get_zone()", "def TransformUTMToDD(self, coordinates):\n conversion_status = True\n decimal_degrees_coordinates = []\n\n zone_number = 0\n zone_letter = \"\"\n easting = 0.0\n northing = 0.0\n\n self._logger.debug(\n \"Performing UTM to DD transformation on coordinates %s.\", coordinates)\n\n matched = re.match(self._utm_pattern, coordinates[0])\n if matched:\n try:\n (zone_number, zone_letter, easting, northing) = matched.groups()\n except Exception, e:\n conversion_status = False\n self._logger.error(\n \"UTM coordinates %s not in proper format, %s.\", coordinates, e)\n\n x = float(easting) - CoordinateTransform.EASTING_ADJ_FACTOR\n y = float(northing)\n\n zone_number = int(zone_number)\n longitude_origin_adjustment = ((zone_number * 6) - 183)\n\n if (\n (zone_letter in [\"A\", \"B\", \"I\", \"O\", \"Y\", \"Z\"]) or\n (zone_number not in xrange(1, 61)) or\n ((zone_letter in [\"X\"]) and (zone_number in [32, 34, 36]))\n ):\n conversion_status = False\n self._logger.error(\"Invalid grid Zone information entered %s.\",\n coordinates)\n\n if zone_letter < \"N\":\n y -= CoordinateTransform.NORTHING_ADJ_FACTOR\n\n # meridional_arc and mu are used in calculating n1, t1, c1, r1 and d1 values\n meridional_arc = float(y/CoordinateTransform.K0)\n mu = (meridional_arc /\n (CoordinateTransform.EARTH_RADIUS *\n (1 - (CoordinateTransform.ECC_SQUARED/4) -\n (3 * math.pow(CoordinateTransform.ECC_SQUARED, 2)/64) -\n (5 * math.pow(CoordinateTransform.ECC_SQUARED, 3)/256))))\n\n # fp is footprint_latitude\n h1 = 3 * self._e1/2\n h2 = 27 * math.pow(self._e1, 3)/32\n h3 = 21 * math.pow(self._e1, 2)/16\n h4 = 55 * math.pow(self._e1, 4)/32\n h5 = 151 * math.pow(self._e1, 3)/96\n h6 = 1097 * math.pow(self._e1, 4)/512\n\n x1 = ((h1 - h2) * math.sin(2 * mu))\n x2 = ((h3 - h4) * math.sin(4 * mu))\n x3 = (h5 * math.sin(6 * mu))\n x4 = (h6 * math.sin(8 * mu))\n fp = mu + x1 + x2 + x3 + x4\n\n n1 = (CoordinateTransform.EARTH_RADIUS/\n math.sqrt(1- (CoordinateTransform.ECC_SQUARED *\n math.pow(math.sin(fp), 2))))\n t1 = math.pow(math.tan(fp), 2)\n c1 = self._e1_prime * math.pow(math.cos(fp), 2)\n r1 = (CoordinateTransform.EARTH_RADIUS * (\n (1 - CoordinateTransform.ECC_SQUARED)/math.pow(\n 1- (CoordinateTransform.ECC_SQUARED *\n math.pow(math.sin(fp), 2)), 1.5)))\n d1 = x/(n1 * CoordinateTransform.K0)\n\n # q1, q2, q3 and q4 are used in calculating the latitude\n\n q1 = n1 * math.tan(fp)/r1\n q2 = (math.pow(d1, 2)/2)\n q3 = (5 + (3 * t1) + (10 * c1) - (4 * math.pow(c1, 2)) -\n (9 * self._e1_prime)) * math.pow(d1, 4)/24\n q4 = (61 + (90 * t1) + (298 * c1) + (45 * math.pow(t1, 2)) -\n (3 * math.pow(c1, 2)) - (252 * self._e1_prime)) * math.pow(d1, 6)/720\n\n # q5, q6 and q7 are used in calculating the longitude\n\n q5 = d1\n q6 = (1 + (2 * t1) + c1) * math.pow(d1, 3)/6\n q7 = (5 -(2 * c1) + (28 * t1)-(3 * math.pow(c1, 2)) + (8 * self._e1_prime) +\n (24 * math.pow(t1, 2))) * math.pow(d1, 5)/120\n\n latitude = ((fp - (q1 * (q2 - q3 + q4))) * 180.0/math.pi)\n longitude = longitude_origin_adjustment + (\n (q5 - q6 + q7)/math.cos(fp)) * 180.0/math.pi\n\n if conversion_status:\n decimal_degrees_coordinates.append(latitude)\n decimal_degrees_coordinates.append(longitude)\n\n return (conversion_status, decimal_degrees_coordinates)", "def LLtoUTM( Lat, Long, ReferenceEllipsoid=23):\r\n a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]\r\n eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]\r\n k0 = 0.9996\r\n\r\n#Make sure the longitude is between -180.00 .. 179.9\r\n LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9\r\n\r\n LatRad = Lat*_deg2rad\r\n LongRad = LongTemp*_deg2rad\r\n\r\n ZoneNumber = int((LongTemp + 180)/6) + 1\r\n\r\n if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0:\r\n ZoneNumber = 32\r\n\r\n # Special zones for Svalbard\r\n if Lat >= 72.0 and Lat < 84.0:\r\n if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31\r\n elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33\r\n elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35\r\n elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37\r\n\r\n LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone\r\n LongOriginRad = LongOrigin * _deg2rad\r\n\r\n #compute the UTM Zone from the latitude and longitude\r\n UTMZone = \"%d%c\" % (ZoneNumber, _UTMLetterDesignator(Lat))\r\n\r\n eccPrimeSquared = (eccSquared)/(1-eccSquared)\r\n N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad))\r\n T = tan(LatRad)*tan(LatRad)\r\n C = eccPrimeSquared*cos(LatRad)*cos(LatRad)\r\n A = cos(LatRad)*(LongRad-LongOriginRad)\r\n\r\n M = a*((1\r\n - eccSquared/4\r\n - 3*eccSquared*eccSquared/64\r\n - 5*eccSquared*eccSquared*eccSquared/256)*LatRad\r\n - (3*eccSquared/8\r\n + 3*eccSquared*eccSquared/32\r\n + 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad)\r\n + (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad)\r\n - (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad))\r\n\r\n UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6\r\n + (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120)\r\n + 500000.0)\r\n\r\n UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24\r\n + (61\r\n -58*T\r\n +T*T\r\n +600*C\r\n -330*eccPrimeSquared)*A*A*A*A*A*A/720)))\r\n\r\n if Lat < 0:\r\n UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere\r\n #UTMZone was originally returned here. I don't know what the\r\n #letter at the end was for.\r\n #print \"UTMZone\", UTMZone \r\n return (ZoneNumber, UTMEasting, UTMNorthing)", "def UTMtoLL(northing, easting, zone, isSouthernHemisphere=True,\r\n ReferenceEllipsoid=23):\r\n k0 = 0.9996\r\n a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]\r\n eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]\r\n e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared))\r\n\r\n x = easting - 500000.0 #remove 500,000 meter offset for longitude\r\n y = northing\r\n\r\n ZoneNumber = int(zone)\r\n if isSouthernHemisphere:\r\n y -= 10000000.0 # remove 10,000,000 meter offset used\r\n # for southern hemisphere\r\n\r\n LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone\r\n\r\n eccPrimeSquared = (eccSquared)/(1-eccSquared)\r\n\r\n M = y / k0\r\n mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))\r\n\r\n phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)\r\n + (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)\r\n +(151*e1*e1*e1/96)*sin(6*mu))\r\n phi1 = phi1Rad*_rad2deg;\r\n\r\n N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))\r\n T1 = tan(phi1Rad)*tan(phi1Rad)\r\n C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad)\r\n R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5)\r\n D = x/(N1*k0)\r\n\r\n Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24\r\n +(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720)\r\n Lat = Lat * _rad2deg\r\n\r\n Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)\r\n *D*D*D*D*D/120)/cos(phi1Rad)\r\n Long = LongOrigin + Long * _rad2deg\r\n return (Lat, Long)", "def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):\n\n a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]\n eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]\n k0 = 0.9996\n\n #Make sure the longitude is between -180.00 .. 179.9\n LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9\n\n LatRad = Lat*_deg2rad\n LongRad = LongTemp*_deg2rad\n\n if zone is None:\n ZoneNumber = int((LongTemp + 180)/6) + 1\n else:\n ZoneNumber = zone\n\n if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0:\n ZoneNumber = 32\n\n # Special zones for Svalbard\n if Lat >= 72.0 and Lat < 84.0:\n if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31\n elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33\n elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35\n elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37\n\n LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone\n LongOriginRad = LongOrigin * _deg2rad\n\n #compute the UTM Zone from the latitude and longitude\n UTMZone = \"%d%c\" % (ZoneNumber, _UTMLetterDesignator(Lat))\n\n eccPrimeSquared = (eccSquared)/(1-eccSquared)\n N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad))\n T = tan(LatRad)*tan(LatRad)\n C = eccPrimeSquared*cos(LatRad)*cos(LatRad)\n A = cos(LatRad)*(LongRad-LongOriginRad)\n\n M = a*((1\n - eccSquared/4\n - 3*eccSquared*eccSquared/64\n - 5*eccSquared*eccSquared*eccSquared/256)*LatRad\n - (3*eccSquared/8\n + 3*eccSquared*eccSquared/32\n + 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad)\n + (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad)\n - (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad))\n\n UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6\n + (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120)\n + 500000.0)\n\n UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24\n + (61\n -58*T\n +T*T\n +600*C\n -330*eccPrimeSquared)*A*A*A*A*A*A/720)))\n if Lat < 0:\n UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere\n return (UTMZone, UTMEasting, UTMNorthing)", "def rowcol_to_latlon(rowcol_str):\n\n # Vectors for code in file name\n latspace = np.linspace(-90,90,46)\n lonspace = np.linspace(-180,180,73)\n lat = latspace[int(rowcol_str[0:2])-1]\n lon = lonspace[int(rowcol_str[2:])-1]\n return (lat,lon)", "def lonlat2xy(s_lon, s_lat): # x: easting, y: northing\r\n # convert decimals to seconds...\r\n s_lon = dec2sec(s_lon)\r\n s_lat = dec2sec(s_lat)\r\n\r\n ## Auxiliary values \r\n # i.e. differences of latitude and longitude relative to Bern in the unit [10000'']\r\n s_lng_aux = (s_lon - 26782.5)/10000.\r\n s_lat_aux = (s_lat - 169028.66)/10000.\r\n \r\n # easting\r\n s_x = (600072.37 \r\n + 211455.93*s_lng_aux \r\n - 10938.51*s_lng_aux*s_lat_aux \r\n - 0.36*s_lng_aux*(s_lat_aux**2) \r\n - 44.54*(s_lng_aux**3))\r\n \r\n # northing\r\n s_y = (200147.07 \r\n + 308807.95*s_lat_aux \r\n + 3745.25*(s_lng_aux**2) \r\n + 76.63*(s_lat_aux**2) \r\n - 194.56*(s_lng_aux**2)*s_lat_aux \r\n + 119.79*(s_lat_aux**3))\r\n\r\n return s_x, s_y", "def utm_from_lonlat(lon, lat, zone=None, ellipsoid='WGS84', datum='WGS84', parallel=False):\n lon = to_list(lon)\n lat = to_list(lat)\n\n # Fix zone arrays/lists/tuple/strings.\n if isinstance(zone, str):\n zone = to_list(zone)\n elif isinstance(zone, np.ndarray):\n zone = zone.tolist()\n\n # For the spherical case, if we haven't been given zones, find them.\n if not zone:\n zone = []\n try:\n for lonlat in zip(lon, lat):\n zone_number = _get_zone_number(*lonlat)\n zone_letter = _get_zone_letter(lonlat[-1])\n if zone_number and zone_letter:\n zone.append('{:d}{}'.format(zone_number, zone_letter))\n else:\n raise ValueError('Invalid zone letter: are your latitudes north or south of 84 and -80 respectively?')\n except TypeError:\n zone_number = _get_zone_number(lon, lat)\n zone_letter = _get_zone_letter(lat)\n if zone_number and zone_letter:\n zone.append('{:d}{}'.format(zone_number, zone_letter))\n else:\n raise ValueError('Invalid zone letter: are your latitudes north or south of 84 and -80 respectively?')\n\n zone = to_list(zone)\n inverse = False\n\n try:\n npos = len(lon)\n if npos != len(lon):\n raise ValueError('Supplied latitudes and longitudes are not the same size.')\n except TypeError:\n npos = 1\n\n # If we've been given a single zone and multiple coordinates, make a\n # list of zones so we can do things easily in parallel.\n try:\n if len(zone) != npos:\n zone = zone * npos\n except TypeError:\n # Leave zone as is.\n pass\n\n # Do this in parallel unless we only have a single position or we've been\n # told not to.\n if npos > 1 and parallel:\n pool = multiprocessing.Pool()\n arguments = zip(lon, lat, zone,\n [ellipsoid] * npos,\n [datum] * npos,\n [inverse] * npos)\n results = pool.map(__convert, arguments)\n results = np.asarray(results)\n eastings, northings = results[:, 0], results[:, 1]\n pool.close()\n elif npos > 1 and not parallel:\n eastings, northings = [], []\n for pos in zip(lon, lat, zone, [ellipsoid] * npos, [datum] * npos, [inverse] * npos):\n result = __convert(pos)\n eastings.append(result[0])\n northings.append(result[1])\n else:\n # The lon, lat and zone will all be lists here, For\n # cross-python2/python3 support, we can't just * them, so assume\n # the first value in the list is what we want.\n try:\n eastings, northings = __convert((lon[0], lat[0], zone[0], ellipsoid, datum, inverse))\n except IndexError:\n eastings, northings = __convert((lon, lat, zone, ellipsoid, datum, inverse))\n\n eastings = to_list(eastings)\n northings = to_list(northings)\n\n return np.asarray(eastings), np.asarray(northings), np.asarray(zone)", "def UTMZone(x,y):\n\n #take longitudinal coordinate and add 180, then divide by 6 and round up\n lon = int(np.ceil((x + 180)/6))\n \n #determine whether y is in the Northern or Southern Hemisphere\n if y > 0:\n code = 326\n else:\n code = 327\n \n #return epsg of the utm zone\n epsg = int(str(code)+str(lon))\n return epsg", "def lonlat_to_eastnorth (lon, lat):\n\t# TODO: allow for different resolution?\n\t# TODO: allow for different formating?\n\t\n\t## Preconditions & preparation:\n\tlon = radians (lon)\n\tlat = radians (lat)\n\t# see explanation above\n\ta, b = OSGB36.a, OSGB36.b\n\tF0 = OSGB36.F0 \n\te2 = OSGB36.e2 \n\tn, n2, n3 = OSGB36.n, OSGB36.n2, OSGB36.n3\n\tlon0 = ORIGIN_LON\n\tlat0 = ORIGIN_LAT\n\t\n\t## MAIN:\n\tcoslat = cos (lat)\n\tsinlat = sin (lat)\n\ttanlat = tan (lat)\n\n\tv = a * F0 * pow (1 - (e2 * sinlat**2), -0.5)\n\trho = a * F0 * (1 - e2) * pow (1 - e2 * sinlat**2, -1.5)\n\teta2 = (v / rho) - 1\n\n\tMa = (1 + n + (5/4)*n2 + (5/4)*n3) * (lat-lat0)\n\tMb = (3*n + 3*n2 + (21/8)*n3) * sin (lat-lat0) * cos (lat+lat0)\n\tMc = ((15/8)*n2 + (15/8)*n3) * sin (2*(lat-lat0)) * cos (2*(lat+lat0))\n\tMd = (35/24)*n3 * sin (3*(lat-lat0)) * cos (3*(lat+lat0))\n\tM = b * F0 * (Ma - Mb + Mc - Md)\n\n\tcos3lat = coslat**3\n\tcos5lat = coslat**5\n\ttan2lat = tanlat**2\n\ttan4lat = tan2lat**2\n\n\tI = M + ORIGIN_NORTHING\n\tII = (v/2)*sinlat*coslat\n\tIII = (v/24)*sinlat*cos3lat*(5-tan2lat+9*eta2)\n\tIIIA = (v/720)*sinlat*cos5lat*(61-58*tan2lat+tan4lat)\n\tIV = v*coslat\n\tV = (v/6)*cos3lat*(v/rho-tan2lat)\n\tVI = (v/120) * cos5lat * (5 - 18*tan2lat + tan4lat + 14*eta2 - 58*tan2lat*eta2)\n\tdelta_lon = lon-lon0\n\n\teast = ORIGIN_EASTING + IV*delta_lon + V*delta_lon**3 + VI*delta_lon**5\n\tnorth = I + II*delta_lon**2 + III*delta_lon**4 + IIIA*delta_lon**6\n\t\n\treturn east, north", "def calculate_UTM_crs(coords):\n if len(coords) == 2:\n longitude, latitude = coords\n elif len(coords) == 4:\n longitude = statistics.mean([coords[0], coords[2]])\n latitude = statistics.mean([coords[1], coords[3]])\n\n utm_zone = utm_getZone(longitude)\n\n utm_isNorthern(latitude)\n\n if utm_isNorthern(latitude):\n direction_indicator = \"+north\"\n else:\n direction_indicator = \"+south\"\n\n utm_crs = \"+proj=utm +zone={} {} +ellps=WGS84 +datum=WGS84 +units=m +no_defs\".format(utm_zone,\n direction_indicator)\n\n return utm_crs", "def zone2use(el_df):\n \n # First, check for any utm zones provided by the user in the emission location file\n utmzones_df = el_df[\"utmzone\"].loc[el_df[\"location_type\"] == \"U\"]\n \n if utmzones_df.shape[0] > 0:\n # there are some; find the smallest one\n utmzones_df['utmzone'] = utmzones_df.apply(lambda row: UTM.getZone(row))\n utmzones_df['utmband'] = utmzones_df.apply(lambda row: UTM.getBand(row))\n min_utmzu = int(np.nan_to_num(utmzones_df['utmzone']).min(axis=0))\n min_utmbu = utmzones_df['utmband'].min()\n else:\n min_utmzu = 0\n min_utmbu = \"Z\"\n\n # Next, compute utm zones from any user provided longitudes and find smallest\n lon_df = el_df[[\"lon\"]].loc[el_df[\"location_type\"] == \"L\"]\n if lon_df.shape[0] > 0:\n lon_df[\"z\"] = ((lon_df[\"lon\"]+180)/6 + 1).astype(int)\n min_utmzl = int(np.nan_to_num(lon_df[\"z\"]).min(axis=0))\n else:\n min_utmzl = 0\n \n lat_df = el_df[[\"lat\"]].loc[el_df[\"location_type\"] == \"L\"]\n if lat_df.shape[0] > 0 and lat_df[\"lat\"].min() < 0:\n min_utmbl = \"S\"\n else:\n min_utmbl = \"N\"\n\n if min_utmzu == 0:\n utmzone = min_utmzl\n else:\n if min_utmzl == 0:\n utmzone = min_utmzu\n else:\n utmzone = min(min_utmzu, min_utmzl)\n\n hemi = min(min_utmbu, min_utmbl)\n \n if utmzone == 0:\n emessage = \"Error! UTM zone is 0\"\n Logger.logMessage(emessage)\n raise Exception(emessage)\n if hemi == \"Z\":\n emessage = \"Error! Hemisphere of UTM zone could not be determined.\"\n Logger.logMessage(emessage)\n raise Exception(emessage)\n\n return utmzone, hemi", "def __test(inLat, inLong, inZone=False):\n e, n, z = utm_from_lonlat(inLong, inLat, inZone)\n lon, lat = lonlat_from_utm(e, n, z)\n\n return z, e, n, lon, lat", "def desiredENU2geo(self, x_L, y_L, z):\n\t\tx = cos(self.local_rot)*x_L - sin(self.local_rot)*y_L\n\t\ty = sin(self.local_rot)*x_L + cos(self.local_rot)*y_L\n\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\n\t\tlat, lon, alt = pm.enu2geodetic(x, y, z, lat0, lon0, self.h0)\n\t\treturn lat, lon, alt", "def get_lat_lon():\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n Latitude = temperatures['Latitude']\r\n Longitude = temperatures['Longitude']\r\n City = temperatures['City']\r\n Country = temperatures['Country']\r\n\r\n lat_array = []\r\n long_array = []\r\n cities_array = []\r\n countries_array = []\r\n tuples = []\r\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\r\n if (i, j) not in tuples:\r\n tuples.append((i, j))\r\n lat_array.append(float(i[:-1]))\r\n long_array.append(float(j[:-1]))\r\n cities_array.append(city)\r\n countries_array.append(country)\r\n\r\n return lat_array, long_array, cities_array, countries_array", "def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz", "def british_national_grid_to_lonlat(eastings, northings):\n\n # E, N are the British national grid coordinates - eastings and northings\n\n # The Airy 180 semi-major and semi-minor axes used for OSGB36 (m)\n a, b = 6377563.396, 6356256.909\n # Scale factor on the central meridian\n F0 = 0.9996012717\n # Latitude of true origin (radians)\n lat0 = np.deg2rad(49)\n # Longtitude of true origin and central meridian (radians)\n lon0 = np.deg2rad(-2)\n # Northing & easting of true origin (m)\n N0, E0 = -100000, 400000\n # eccentricity squared\n e2 = 1 - (b*b)/(a*a)\n n = (a-b)/(a+b)\n\n # Iterate through the pairs of values in eastings and northings.\n lonlist, latlist = [], []\n for xy in zip(eastings, northings):\n\n E = xy[0]\n N = xy[1]\n\n # Initialise the iterative variables\n lat, M = lat0, 0\n\n while N - N0 - M >= 0.00001: # Accurate to 0.01mm\n lat = (N - N0 - M)/(a * F0) + lat\n M1 = (1 + n + (5./4) * n**2 + (5./4) * n**3) * (lat-lat0)\n M2 = (3*n + 3 * n**2 + (21./8)*n**3) * np.sin(lat-lat0) * \\\n np.cos(lat+lat0)\n M3 = ((15./8) * n**2 + (15./8)*n**3) * np.sin(2*(lat-lat0)) * \\\n np.cos(2 * (lat+lat0))\n M4 = (35./24)*n**3 * np.sin(3*(lat-lat0)) * np.cos(3*(lat+lat0))\n # meridional arc\n M = b * F0 * (M1 - M2 + M3 - M4)\n\n # transverse radius of curvature\n nu = a * F0 / np.sqrt(1-e2 * np.sin(lat)**2)\n\n # meridional radius of curvature\n rho = a * F0 * (1-e2) * (1-e2 * np.sin(lat)**2)**(-1.5)\n eta2 = nu / rho-1\n\n secLat = 1./np.cos(lat)\n VII = np.tan(lat) / (2 * rho * nu)\n VIII = np.tan(lat) / (24 * rho * nu**3) * (5 + 3 * np.tan(lat)**2 +\n eta2 - 9 * np.tan(lat)**2 * eta2)\n IX = np.tan(lat) / (720 * rho * nu**5) * (61 + 90 * np.tan(lat)**2 +\n 45 * np.tan(lat)**4)\n X = secLat / nu\n XI = secLat / (6 * nu**3) * (nu / rho + 2 * np.tan(lat)**2)\n XII = secLat / (120 * nu**5) * (5 + 28 * np.tan(lat)**2 + 24 *\n np.tan(lat)**4)\n XIIA = secLat / (5040 * nu**7) * (61 + 662 * np.tan(lat)**2 + 1320 *\n np.tan(lat)**4 + 720 * np.tan(lat)**6)\n dE = E-E0\n\n # These are on the wrong ellipsoid currently: Airy1830. (Denoted by _1)\n lat_1 = lat - VII * dE**2 + VIII * dE**4 - IX * dE**6\n lon_1 = lon0 + X * dE - XI * dE**3 + XII * dE**5 - XIIA * dE**7\n\n # Want to convert to the GRS80 ellipsoid.\n # First convert to cartesian from spherical polar coordinates\n H = 0 # Third spherical coord.\n x_1 = (nu / F0 + H) * np.cos(lat_1) * np.cos(lon_1)\n y_1 = (nu / F0 + H) * np.cos(lat_1) * np.sin(lon_1)\n z_1 = ((1-e2) * nu / F0 + H) * np.sin(lat_1)\n\n # Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2))\n s = -20.4894 * 10**-6 # The scale factor -1\n # The translations along x,y,z axes respectively\n tx, ty, tz = 446.448, -125.157, + 542.060\n # The rotations along x,y,z respectively, in seconds\n rxs, rys, rzs = 0.1502, 0.2470, 0.8421\n # And in radians\n rx = rxs * np.pi / (180 * 3600.)\n ry = rys * np.pi / (180 * 3600.)\n rz = rzs * np.pi / (180 * 3600.)\n\n x_2 = tx + (1 + s) * x_1 + (-rz) * y_1 + (ry) * z_1\n y_2 = ty + (rz) * x_1 + (1 + s) * y_1 + (-rx) * z_1\n z_2 = tz + (-ry) * x_1 + (rx) * y_1 + (1 + s) * z_1\n\n # Back to spherical polar coordinates from cartesian\n # Need some of the characteristics of the new ellipsoid\n\n # The GSR80 semi-major and semi-minor axes used for WGS84(m)\n a_2, b_2 = 6378137.000, 6356752.3141\n # The eccentricity of the GRS80 ellipsoid\n e2_2 = 1 - (b_2 * b_2) / (a_2 * a_2)\n p = np.sqrt(x_2**2 + y_2**2)\n\n # Lat is obtained by an iterative proceedure:\n lat = np.arctan2(z_2, (p * (1-e2_2))) # Initial value\n latold = 2 * np.pi\n while abs(lat - latold) > 10**-16:\n lat, latold = latold, lat\n nu_2 = a_2 / np.sqrt(1-e2_2 * np.sin(latold)**2)\n lat = np.arctan2(z_2 + e2_2 * nu_2 * np.sin(latold), p)\n\n # Lon and height are then pretty easy\n lon = np.arctan2(y_2, x_2)\n H = p / np.cos(lat) - nu_2\n\n # Convert to degrees\n latlist.append(np.rad2deg(lat))\n lonlist.append(np.rad2deg(lon))\n\n # Convert to NumPy arrays.\n lon = np.asarray(lonlist)\n lat = np.asarray(latlist)\n\n # Job's a good'n.\n return lon, lat", "def convert(coords):\n lat = coords[:4]\n lon = coords[4:]\n\n lat = lat[:2] + \".\" + lat[2:]\n\n if int(lon[0]) > 5:\n lon = \"-\" + lon[:2] + \".\" + lon[2:]\n else:\n lon = \"-1\" + lon[:2] + \".\" + lon[2:]\n\n return (float(lat), float(lon))", "def zonecoord_to_eastnorth (coord_str):\n\t## Preconditions:\n\tlen_str = len (coord_str)\n\tassert (len_str % 2 == 0)\n\t## Main:\n\t# calc size & resolution of numeric portion, split\n\trez = len_str / 2\n\tosgb_easting = coord_str[:rez]\n\tosgb_northing = coord_str[rez:]\n\t# what is each digit (in metres)\n\trez_unit = 10.0**(5-rez)\n\treturn int (osgb_easting) * rez_unit, int (osgb_northing) * rez_unit", "def nad83(self, coordinates, reverse=False):\n payload = {\n 'format': 'json',\n 's_srs': 4326,\n 't_srs': 3401,\n 'x': coordinates[1],\n 'y': coordinates[0]\n }\n if reverse:\n payload['s_srs'], payload['t_srs'] = payload['t_srs'], payload['s_srs']\n payload['x'], payload['y'] = payload['y'], payload['x']\n\n url = 'http://epsg.io/trans'\n r = requests.get(url, params=payload)\n try:\n data = r.json()\n except ValueError:\n data = dict(y='0', x='0')\n\n value = (float(data['y']), float(data['x']))\n return value", "def geo_transform(self):\n pass", "def lat_lons(self):", "def row_to_lat(self, row):\n return self.max_absolute_lat - row * self.lat_step", "def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)", "def process_coords():\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed", "def geoConvertTBXY(center, tbxy):\n\tif tbxy != \"\":\n\t\ttbxy_parts = tbxy.partition(\":\")\n\n\t\tif center == \"STCC\":\n\t\t\treturn db.GeoPt(\n\t\t\t\tlat = float(tbxy_parts[2]) * 0.00000274 +\t 33.172,\n\t\t\t\tlon = float(tbxy_parts[0]) * 0.0000035 - 144.966\n\t\t\t)\n\t\telif center == \"SLCC\":\n\t\t\treturn db.GeoPt(\n\t\t\t\tlat = float(tbxy_parts[2]) * 0.00000275 +\t 30.054,\n\t\t\t\tlon = float(tbxy_parts[0]) * 0.00000329 - 126.589\n\t\t\t)\n\t\telif center == \"FRCC\":\n\t\t\treturn db.GeoPt(\n\t\t\t\tlat = float(tbxy_parts[2]) * 0.00000274 +\t 30.84,\n\t\t\t\tlon = float(tbxy_parts[0]) * 0.00000335 - 141\n\t\t\t)\n\n\treturn None", "def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y", "def parse_lon_lat(grid, lon, lat):\n if lat is None:\n lat = grid.origin_latitude[\"data\"][0]\n if lon is None:\n lon = grid.origin_longitude[\"data\"][0]\n return lon, lat", "def get_location(self):\n # h = b'\\r\\nAT-MSGEO\\r\\r\\n-MSGEO: -3936,3464,-3612,7402d50c\\r\\n\\r\\n'\n # an example of the string returned from the AT-MSGEO used for testing.\n h = self.acquire_response(b'AT-MSGEO')\n if isinstance(h, bytes):\n h = h.decode('utf-8')\n h = h.strip()\n h = h.split(':')\n h = h[1].split(',')\n x = int(h[0])*1000 # Convert coordinates to meters.\n y = int(h[1])*1000\n z = int(h[2])*1000\n else:\n print('Location not available')\n\n # 'geocent' refers to the geo-centered frame that the co-ordinates are returned in\n inProj = Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n\n # 'latlong' is the frame to be converted to\n outProj = Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n\n # Convert X, Y, Z to latitude, longitude and altitude\n long, lat, alt = transform(inProj, outProj, x, y, z, radians=False)\n # l = [str(long), str(lat), str(alt)]\n return long, lat, alt", "def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy", "def project_normalize_coordinates(node_feats, transformer=None, crs=None):\n # get home node:\n home_node = node_feats.iloc[\n (node_feats[\"in_degree\"] + node_feats[\"out_degree\"]).argmax()\n ]\n home_center = home_node[\"center\"]\n\n @to_series\n def get_projected_displacement(x, y, home_center):\n if (x_min < x < x_max) and (y_min < y < y_max):\n proj_x, proj_y = transformer.transform(x, y)\n home_x, home_y = transformer.transform(home_center.x, home_center.y)\n return (proj_x - home_x, proj_y - home_y)\n else: # fall back to haversine\n return get_haversine_displacement.__wrapped__(x, y, home_center)\n\n if transformer is not None:\n # get bounds\n x_min, y_min, x_max, y_max = crs.area_of_use.bounds\n normed_coords = node_feats[\"center\"].apply(\n get_projected_displacement, args=[home_center]\n )\n else:\n normed_coords = node_feats[\"center\"].apply(\n get_haversine_displacement, args=[home_center]\n )\n\n # add_distance\n normed_coords[\"distance\"] = normed_coords.apply(\n lambda x: np.sqrt(x[0] ** 2 + x[1] ** 2), axis=1\n )\n # TODO: add as a TEST! compare haversine dist to fake-projected coordinates\n # print(normed_coords[\"distance\"])\n # test_distance = node_feats[\"center\"].apply(\n # lambda point: ti.geogr.point_distances.haversine_dist(\n # point.x, point.y, home_center.x, home_center.y\n # )[0]\n # )\n # print(\n # pd.merge(\n # normed_coords, test_distance, left_index=True, right_index=True\n # )\n # )\n\n return pd.merge(\n node_feats, normed_coords, left_index=True, right_index=True\n )", "def get_coordinates(table, replace_columns=False, remove_nans=False):\n assert \"zip code\" in table.labels or ((\"city\" in table.labels or \"county\" in table.labels) and \"state\" in table.labels)\n ref = Table.read_table(pkg_resources.resource_filename(__name__, \"geodata/geocode_states.csv\"))\n\n index_name = \"\".join(table.labels) # Ensures that index can't possibly be one of the preexisting columns\n index_name += \" \"\n \n table = table.with_columns(index_name, np.arange(table.num_rows))\n lat = np.array([np.nan] * table.num_rows)\n lon = np.array([np.nan] * table.num_rows)\n unassigned = set(range(table.num_rows)) \n while len(unassigned) > 0:\n index = unassigned.pop()\n row = table.take(index).take(0)\n if \"zip code\" in table.labels:\n select = table.where(\"zip code\", row[\"zip code\"][0]).column(index_name)\n unassigned -= set(select)\n try:\n ref_lat, ref_lon = ref.where(\"zip\", int(row[\"zip code\"][0])).select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n else:\n state_select = table.where(\"state\", row[\"state\"][0]).column(index_name)\n county_select = table.where(\"county\", row[\"county\"][0]).column(index_name) if \"county\" in table.labels else np.arange(table.num_rows)\n city_select = table.where(\"city\", row[\"city\"][0]).column(index_name) if \"city\" in table.labels else np.arange(table.num_rows)\n select = set.intersection(set(state_select), set(county_select), set(city_select))\n unassigned -= select\n select = list(select)\n try:\n matched_ref = ref.where(\"state\", row[\"state\"][0])\n if \"county\" in table.labels:\n matched_ref = matched_ref.where(\"county\", row[\"county\"][0].lower())\n if \"city\" in table.labels:\n matched_ref = matched_ref.where(\"city\", row[\"city\"][0].lower())\n ref_lat, ref_lon = matched_ref.select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n table = table.with_columns(\"lat\", lat, \"lon\", lon)\n table = table.drop(index_name)\n if replace_columns:\n for label in [\"county\", \"city\", \"zip code\", \"state\"]:\n try:\n table = table.drop(label)\n except KeyError:\n pass\n if remove_nans: \n table = table.where(\"lat\", are.below(float(\"inf\"))) # NaNs are not considered to be smaller than infinity\n return table", "def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def convertLatLon(latCell, lonCell):\n cell_lats = np.array([])\n cell_lons = np.array([])\n for lat in latCell:\n cell_lats = np.append(cell_lats, lat * (180 / np.pi)) \n for lon in lonCell:\n cell_lons = np.append(cell_lons, lon * (180 / np.pi)) \n\n return cell_lats, cell_lons", "def location_input():\n \n # location for OPV greenhouse at UA CEAC in Tucson, AZ\n \n latitude = 32.28 # OPV greenhouse latitude (deg)\n longitude = -110.94 # OPV greenhouse longitude (deg)\n timezone = -7 # Tucson, AZ timezone (UTC)\n elevation = 718 # OPV greenhouse elevation (m)\n \n return latitude, longitude, timezone, elevation", "def convertCoord(lon, lat, inEPSG, outEPSG):\n from pyproj import Proj, transform\n inProj = Proj(init='epsg:'+str(inEPSG))\n outProj = Proj(init='epsg:'+str(outEPSG))\n x, y = transform(inProj, outProj, lon, lat)\n return x, y\n # epsg:4326 WGS84\n # epsg:2950 MTM8\n # epsg:6622 Quebec Lambert", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def coords_from_cell(cell, lon = [-8.73, -8.50], lat = [41.10, 41.25], N = 100, M = 75):\n lon_step = (lon[1] - lon[0]) / N \n lat_step = (lat[1] - lat[0]) / M\n \n middle_lon = lon[0] + cell[0] * lon_step + lon_step / 2\n middle_lat = lat[0] + cell[1] * lat_step + lat_step / 2\n \n return [middle_lon, middle_lat]", "def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))", "def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def map_coord_transformer(df, proj_string, lat_column_name, long_column_name):\n logging.info('Generating coordinate reference systems... ')\n #generate coordinate reference system objects for details of how this works \n from_crs = pyproj.CRS.from_string(proj_string)\n from_proj = pyproj.Proj(from_crs)\n gps_proj = pyproj.Proj('epsg:4326')\n original_coordinates_to_latlong_obj = pyproj.Transformer.from_proj(from_proj, gps_proj)\n logging.info('Defining transformation functions...')\n def original_coordinates_to_latlong(adf):\n (lat,long) = original_coordinates_to_latlong_obj.transform(adf[lat_column_name], adf[long_column_name])\n return lat, long\n \n #apply converter to generate series\n logging.info('Converting coordinates...')\n latlong_series = df.apply(original_coordinates_to_latlong, axis=1)\n \n #get calculated values and put back into df.\n logging.info('Splitting series...')\n lat_series = latlong_series.copy().apply(lambda x: x[0])\n long_series = latlong_series.copy().apply(lambda x: x[1])\n \n #return the values as \n logging.info('Preparing to return calc_lat and calc_long...')\n df.loc[:,'calc_lat'] = lat_series.copy()\n df.loc[:,'calc_long'] = long_series.copy()\n \n return df", "def split_stn_coords(U, stn):\n if stn == 101:\n return U[9], U[10], U[11], 9\n elif stn == 337:\n return U[12], U[13], U[14], 12\n elif stn == 394:\n return U[15], U[16], U[17], 15", "def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude", "def ll2xyz(lon_pt,lat_pt):\n\n xPt = np.cos(lat_pt) * np.cos(lon_pt)\n yPt = np.cos(lat_pt) * np.sin(lon_pt)\n zPt = np.sin(lat_pt)\n return [xPt,yPt,zPt]", "def to_lnglat(r):\n \n x, y = r['x_coordinate'], r['y_coordinate']\n if np.isnan(x) or np.isnan(y):\n return None, None\n return projection(x, y, inverse = True)", "def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)", "def calcLatLon(northing, easting):\n from math import asin, atan2, cos, log, pow, sin, sqrt\n\n # CONSUS Albers variables (EPSG: 5070)\n RE_NAD83 = 6378137.0\n E_NAD83 = 0.0818187034 # Eccentricity\n D2R = 0.01745329251 # Pi/180\n standardParallel1 = 43.\n standardParallel2 = 47.\n centralMeridian = -114.\n originLat = 30\n originLon = 0\n\n m1 = cos(standardParallel1 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel1 * D2R)), 2.0))\n m2 = cos(standardParallel2 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel2 * D2R)), 2.0))\n\n def calcPhi(i):\n sinPhi = sin(i * D2R)\n return (1.0 - pow(E_NAD83, 2.0)) * \\\n ((sinPhi/(1.0 - pow((E_NAD83 * sinPhi), 2.0))) -\n 1.0/(2.0 * E_NAD83) *\n log((1.0 - E_NAD83 * sinPhi)/(1.0 + E_NAD83 * sinPhi)))\n\n q0 = calcPhi(originLat)\n q1 = calcPhi(standardParallel1)\n q2 = calcPhi(standardParallel2)\n nc = (pow(m1, 2.0) - pow(m2, 2.0)) / (q2 - q1)\n C = pow(m1, 2.0) + nc * q1\n rho0 = RE_NAD83 * sqrt(C - nc * q0) / nc\n rho = sqrt(pow(easting, 2.0) + pow((rho0 - northing), 2.0))\n q = (C - pow((rho * nc / RE_NAD83), 2.0)) / nc\n beta = asin(q / (1.0 - log((1.0 - E_NAD83) / (1.0 + E_NAD83)) *\n (1.0 - pow(E_NAD83, 2.0))/(2.0 * E_NAD83)))\n a = 1.0 / 3.0 * pow(E_NAD83, 2.0) + 31.0 / 180.0 * \\\n pow(E_NAD83, 4.0) + 517.0 / 5040.0 * pow(E_NAD83, 6.0)\n b = 23.0/360.0 * pow(E_NAD83, 4.0) + 251.0 / 3780.0 * pow(E_NAD83, 6.0)\n c = 761.0/45360.0 * pow(E_NAD83, 6.0)\n theta = atan2(easting, (rho0 - northing))\n\n lat = (beta + a * sin(2.0 * beta) + b * sin(4.0 * beta) +\n c * sin(6.0 * beta))/D2R\n lon = centralMeridian + (theta / D2R) / nc\n coords = [lat, lon]\n\n return coords", "def find_stn(ref_lon, ref_lat, tlon, tlat):\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # get the 4 model grid points longitudes and latitudes\n lonlist = []\n latlist = []\n for i in Ilist:\n for j in Jlist:\n lonlist.append(tlon[i,j])\n latlist.append(tlat[i,j])\n\n # convert Python lists to numpy arrays\n lonlist = N.array(lonlist)\n latlist = N.array(latlist)\n\n return lonlist, latlist", "def noaa_station_data_from_row(row):\n if len(row) < 12:\n return None\n latitude = parselatlon_noaa(row[7], 'N', 'S')\n if latitude is None:\n return None\n longitude = parselatlon_noaa(row[8], 'E', 'W')\n if longitude is None:\n return None\n\n height = _int_from_string(row[11])\n return {LATITUDE:latitude, LONGITUDE:longitude, ELEVATION:height}", "def latlons(self):\n\t\t\n\t\t# First check we have a grid feature type\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\n\t\t\tlatvar = self.latitude_variable\n\t\t\tlonvar = self.longitude_variable\n\n\t\t\tlatdims = self.coordinates_mapping['latitude']['map']\n\t\t\tlondims = self.coordinates_mapping['longitude']['map']\n\n\t\t\t# Create latitude and longitude subset slices from the field subset slices\n\t\t\tlat_subset = []\n\t\t\tfor dim in latdims:\n\t\t\t\tlat_subset.append(self._subset[dim])\n\t\t\t\n\t\t\tlon_subset = []\n\t\t\tfor dim in londims:\n\t\t\t\tlon_subset.append(self._subset[dim])\n\n\t\t\t# Then check if latitude and longitude variables are 1D\n\t\t\tif len(latvar.shape) == 1 and len(lonvar.shape) == 1:\n\t\t\t\tlatvar_2d = latvar[lat_subset].reshape((-1,1)).repeat(lonvar.shape[0], axis=1)\n\t\t\t\tlonvar_2d = lonvar[lon_subset].reshape((-1,1)).transpose().repeat(latvar.shape[0], axis=0)\n\t\t\t\treturn (latvar_2d, lonvar_2d)\n\t\t\t\n\t\t\t# for 2D variables its easy, just return the variable data\n\t\t\telif len(latvar.shape) >= 2 and len(lonvar.shape) >= 2:\n\t\t\t\t\n\t\t\t\t# Handle the WRF case where lat/lon variables are 3D with time as first dimension\n\t\t\t\tif len(latvar.shape) == 3 and len(lonvar.shape) == 3:\n\t\t\t\t\treturn (latvar[0,lat_subset], lonvar[0,lon_subset])\n\t\t\t\telse:\n\t\t\t\t\treturn (latvar[lat_subset], lonvar[lon_subset])\n\t\t\t\n\t\t\t# otherwise, we can't do it!\n\t\t\telse:\n\t\t\t\treturn (None, None)\n\t\t\n\t\telif self.featuretype == 'PointSeries':\n\t\t\treturn (self.latitude_variable[:], self.longitude_variable[:])", "def makeUpCoords(numb):\n # bounds of UK in EPSG:4326\n minLat=49.96\n maxLat=60.84\n minLon=-7.5\n maxLon=1.78\n # generate array of random numbers\n lon=np.random.rand(numb)*(maxLon-minLon)+minLon\n lat=np.random.rand(numb)*(maxLat-minLat)+minLat\n return(lon,lat)", "def oszone_to_eastnorth (ossquare):\n\t## Preconditions:\n\tassert (len (ossquare) == 2)\n\t## Main:\n\t# find the 500km square\n\tmainsq = ossquare[0]\n\tif (mainsq is 'S'):\n\t\tx, y = 0, 0\n\telif (mainsq is 'T'):\n\t\tx, y = 1, 0\n\telif (mainsq is 'N'):\n\t\tx, y = 0, 1\n\telif (mainsq is 'O'):\n\t\tx, y = 1, 1\n\telif (mainsq is 'H'):\n\t\tx, y = 0, 2\n\telif (mainsq is 'J'):\n\t\tx, y = 1, 2\n\telse:\n\t\tassert (False), \"'%s' is not an OSGB 500km square\" % mainsq\n\teasting = x * 500\n\tnorthing = y * 500\n\t\n\t# find the 100km offset & add\n\tgrid = \"VWXYZQRSTULMNOPFGHJKABCDE\"\n\tminorsq = mainsq = ossquare[1]\n\tassert (minorsq in grid), \"'%s' is not an OSGB 100km square\" % minorsq\n\tposn = grid.find (minorsq)\n\teasting += (posn % 5) * 100\n\tnorthing += (posn / 5) * 100\n\treturn easting * 1000, northing * 1000", "def csv_azmdist2latlon(self):\n global input_data\n err_msg = ''\n # Create temporary vector layer to store oytput points\n v_lyr_name = tmp_layer_name() # Get layer name\n self.create_tmp_layer(v_lyr_name)\n # Set layer to active\n v_lyr = QgsVectorLayer('Point?crs=epsg:4326', v_lyr_name, 'memory')\n v_lyr = self.iface.activeLayer()\n # Enabale edititing\n v_lyr.startEditing()\n v_prov = v_lyr.dataProvider()\n feat = QgsFeature()\n \n out_csv_field_names = ['P_NAME', 'AZM_BRNG', 'DIST', 'LAT_DMS', 'LON_DMS', 'NOTES']\n with open(input_data.f_in, 'r') as in_csv:\n with open(input_data.f_out, 'w') as out_csv:\n reader = csv.DictReader(in_csv, delimiter = ';')\n writer = csv.DictWriter(out_csv, fieldnames = out_csv_field_names, delimiter = ';')\n for row in reader:\n try: # Try to read line according to field names\n azm_dist_valid, err_msg = validate_azm_dist(row['AZM_BRNG'], row['DIST'])\n if azm_dist_valid: # azimuth or brng and distance are valid\n # Correct azimuth by magnetic variation\n azm = float(row['AZM_BRNG']) + input_data.r_mag\n if azm < 0:\n azm += 360\n elif azm > 360:\n azm -= 360\n # Calculate second point latitude and longitude in decimal degress \n ep_lat_dd, ep_lon_dd = vincenty_direct_solution(input_data.r_lat, input_data.r_lon, azm, float(row['DIST']), WGS84_A, WGS84_B, WGS84_F)\n # Convert to DMS format with hemisphere indicator as prefix\n ep_lat_dms = dd2dms_shdp(ep_lat_dd, C_LAT)\n ep_lon_dms = dd2dms_shdp(ep_lon_dd, C_LON)\n # Write result to output file\n writer.writerow({'P_NAME': row['P_NAME'],\n 'AZM_BRNG': row['AZM_BRNG'],\n 'DIST': row['DIST'],\n 'LAT_DMS' : ep_lat_dms,\n 'LON_DMS' : ep_lon_dms,\n 'NOTES' : ''})\n # Write result to temporary layer\n end_point = QgsPoint(ep_lon_dd, ep_lat_dd)\n feat.setGeometry(QgsGeometry.fromPoint(end_point))\n feat.setAttributes([0, row['P_NAME'], ep_lat_dms, ep_lon_dms])\n v_prov.addFeatures([feat])\n v_lyr.commitChanges()\n else: # azimuth or brng or distance is not valid write err_msg to NOTES\n writer.writerow({'P_NAME': row['P_NAME'],\n 'AZM_BRNG': row['AZM_BRNG'],\n 'DIST': row['DIST'],\n 'LAT_DMS' : '',\n 'LON_DMS' : '',\n 'NOTES' : err_msg})\n except: # Row of csv does not match to field names in header\n writer.writerow({'P_NAME': row['P_NAME'],\n 'AZM_BRNG': row['AZM_BRNG'],\n 'DIST': row['DIST'],\n 'LAT_DMS' : '',\n 'LON_DMS' : '',\n 'NOTES' : 'Wrong CSV line'})\n \n v_lyr.updateExtents() \n return", "def project_xy_to_latlng(x, y):\n if x and y: # neither are blank\n d = {}\n latlng = NYSP1983_PROJ(int(x), int(y), inverse=True)\n d['longitude'], d['latitude'] = [round(c, 5) for c in latlng] # round em\n return d\n else:\n return {'longitude': None, 'latitude': None}", "def get_lat_long(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n postcode_df = self.postcode_df\n postcode_df = postcode_df.fillna('np.nan')\n postcode_df = postcode_df.set_index('Postcode')\n index_data = postcode_df.loc[postcodes]\n lat = np.array(index_data['Latitude']).T\n lng = np.array(index_data['Longitude']).T\n\n return np.vstack((lat, lng)).transpose()", "def northing(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return y", "def add_latlon(df):\n LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])]\n LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude'])\n return pd.concat([df.reset_index(drop=True),LLdf],axis = 1)", "def get_ecmwf_lat_lon(nc_file):\n from netCDF4 import Dataset\n \n fh = Dataset(nc_file, mode='r')\n\n latitude_ecmwf = fh.variables['latitude_ecmwf'][:]\n longitude_ecmwf = fh.variables['longitude_ecmwf'][:]\n\n lonmesh_ecmwf,latmesh_ecmwf = np.meshgrid(longitude_ecmwf,latitude_ecmwf)\n\n print('latitude_ecmwf: ', latitude_ecmwf.shape)\n print('longitude_ecmwf: ', longitude_ecmwf.shape)\n \n return latitude_ecmwf, longitude_ecmwf, latmesh_ecmwf, lonmesh_ecmwf;", "def _getlats(self):\n lats = 90. - np.degrees(self.zeros)\n return lats", "def gps_to_coords(self,lat,lon):\n\n if (lat <= self.min_lat or lat >= self.max_lat or lon <= self.min_lon or lon >= self.max_lon):\n return (-1,-1)\n\n lat_spot = int((self.max_lat-lat)/self.lat_step)\n lon_spot = int((lon-self.min_lon)/self.lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def wgs84_to_mercator(df, lon, lat):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df", "def cartesian_to_geographical(coordinate_triples):\n if len(coordinate_triples.shape) == 1:\n x = coordinate_triples[0]\n y = coordinate_triples[1]\n z = coordinate_triples[2]\n elif len(coordinate_triples.shape) == 2:\n assert coordinate_triples.shape[1] == 3\n x = coordinate_triples[:, 0]\n y = coordinate_triples[:, 1]\n z = coordinate_triples[:, 2]\n radius = np.sqrt(x**2 + y**2 + z**2)\n longitudes = np.arctan2(y, x)\n latitudes = np.arcsin(z/radius)\n return (latitudes, longitudes)", "def _getXYZ ( lon, lat ):\n d2r = pi / 180.\n rlon, rlat = ( d2r * lon, d2r * lat )\n x = cos(rlat) * cos(rlon)\n y = cos(rlat) * sin(rlon)\n z = sin(rlat)\n return (x,y,z)", "def get_coordinates(addresses, boroughs):\n latitude = []\n longitude = []\n for address, borough in zip(addresses, boroughs):\n try:\n g = geocoder.osm('{}, {}, New York'.format(address, borough)).json\n latitude.append(g['lat'])\n longitude.append(g['lng'])\n except:\n latitude.append(None)\n longitude.append(None)\n\n return np.array(latitude).T, np.array(longitude).T", "def convert_to_utm(self, lat, lon):\n\t\ttry:\n\t\t\treturn utm.from_latlon(lat, lon)\n\t\texcept e as Exception:\n\t\t\tprint(\"Error converting lat/lon to utm: {}\".format(e))\n\t\t\treturn None", "def transform_utm_to_wgs(row):\n return pd.Series(pyproj.transform(utm12n, wgs84, row['xutm'], row['yutm']))", "def _get_latitude(self, latitude, hemisphere):\n if not isinstance(latitude, float):\n latitude = float(latitude)\n if hemisphere.lower() == \"n\":\n return latitude\n if hemisphere.lower() == \"s\":\n return -1 * latitude", "def calcStationCoords(station, gridSquares):\n\n # calculate coordinates and precision\n gridRef = station[\"gridReference\"]\n gridCode = gridRef[:2]\n station[\"precision\"] = 10 ** (5 - len(gridRef[2:])/2) # Units: meters\n station[\"easting\"] = (\n gridSquares[gridCode][0] + int(gridRef[2:len(gridRef[2:])/2 + 2]) *\n station[\"precision\"]\n )\n station[\"northing\"] = (\n gridSquares[gridCode][1] + int(gridRef[len(gridRef[2:])/2 + 2:]) *\n station[\"precision\"]\n )\n\n return station", "def geo2cell(geofile, posfile):", "def normalize_simple(line):\n first = find_next_comma_newline(line,0)\n #print \"first: %d\" % first\n second = find_next_comma_newline(line,first+1)\n #print \"second: %d\" % second\n third = find_next_comma_newline(line,second+1)\n #print \"third: %d\" % third\n if third == -1:\n lon = float(line[second+1:])\n else:\n lon = float(line[second+1:third])\n return int(line[0:first]),float(line[first+1:second]),lon", "def convertView2Geo(self, x, y):\n\n # x_pix is from left map edge, y_pix from top map edge\n x_pix = x + self.view_offset_x\n y_pix = y + self.view_offset_y\n\n lon = self.map_llon + x_pix/self.ppd_x\n lat = self.map_tlat - y_pix/self.ppd_y\n\n return (lon, lat)", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def calcPosition (lat, lon):\n nauticalMilePerLat = 60.00721\n nauticalMilePerLongitude = 60.10793\n rad = math.pi / 180.0\n milesPerNauticalMile = 1.15078\n \n y = lat * nauticalMilePerLat\n x = math.cos(lat * rad) * lon * nauticalMilePerLongitude\n\n return x * milesPerNauticalMile * 1609.344, y * milesPerNauticalMile * 1609.344", "def get_near_cities_from_user_coordinates(user_coordinates):\n data = pandas.read_csv('city_coordinates.tsv', sep='\\t')\n cities = data['city_ascii']\n latitudes, longitudes = data['lat'], data['lng']\n distance_list = []\n for city, lat, lng in zip(cities, latitudes, longitudes):\n try:\n distance = geodesic((lat, lng), user_coordinates).km\n distance_list.append(((lat, lng), city, distance))\n except Exception:\n continue\n distance_list_sorted = sorted(distance_list, key=lambda x: x[-1])\n return [elem[-2] for elem in distance_list_sorted[:100]]", "def lunar_longitude(cls, tee):\n return cls.true_position(tee, cls.SIDEREAL_MONTH, 32/360, cls.ANOMALISTIC_MONTH, 1/96)", "def geo2desiredENU(self, lat, lon, h):\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\t\tx,y,z = pm.geodetic2enu(lat, lon, h, lat0, lon0, self.h0)\n\n\t\tx_L = cos(self.local_rot)*x + sin(self.local_rot)*y\n\t\ty_L = -1*sin(self.local_rot)*x + cos(self.local_rot)*y\n\n\t\tz = self.curr_z_enu - self.GND_ALT\n\t\treturn x_L, y_L, z", "def nancay():\n return coord.EarthLocation(lat=47.376511*u.deg, lon=2.1924002*u.deg)", "def fn2lonlat(filename):\n tokens = filename.split(\"/\")[-1].rsplit(\".\", 1)[0].split(\"x\")\n return [0 - float(tokens[0]), float(tokens[1])]", "def transform_coordinates(coords):\n # WGS 84 reference coordinate system parameters\n A = 6378.137 # major axis [km]\n E2 = 6.69437999014e-3 # eccentricity squared\n\n coords = prepare_coords(coords)\n\n # convert to radiants\n lat_rad = np.radians(coords[:, 0])\n lon_rad = np.radians(coords[:, 1])\n\n # convert to cartesian coordinates\n r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))\n x = r_n * np.cos(lat_rad) * np.cos(lon_rad)\n y = r_n * np.cos(lat_rad) * np.sin(lon_rad)\n z = r_n * (1 - E2) * np.sin(lat_rad)\n\n return np.column_stack((x, y, z))", "def lnglat_to_meters(longitude, latitude):\n if isinstance(longitude, (list, tuple)):\n longitude = numpy.array(longitude)\n if isinstance(latitude, (list, tuple)):\n latitude = numpy.array(latitude)\n\n origin_shift = numpy.pi * 6378137\n easting = longitude * origin_shift / 180.0\n northing = numpy.log(numpy.tan((90 + latitude) * numpy.pi / 360.0)) * origin_shift / numpy.pi\n return (easting, northing)", "def get_lonlat(self, row, col):\n\n if self.ndim != 2:\n raise DimensionError(('operation undefined '\n 'for %sD geometry ') % self.ndim)\n elif self.lons is None or self.lats is None:\n raise ValueError('lon/lat values are not defined')\n return self.lons[row, col], self.lats[row, col]", "def _fixupPosition(self, position):\n if \"latitudeI\" in position:\n position[\"latitude\"] = position[\"latitudeI\"] * 1e-7\n if \"longitudeI\" in position:\n position[\"longitude\"] = position[\"longitudeI\"] * 1e-7", "def get_latlon_point(self, row, col):\n p1 = Proj(self.src.crs)\n window = rasterio.windows.Window(col, row, 1, 1)\n trnsfrm = self.src.window_transform(window)\n T1 = trnsfrm * Affine.translation(0.5, 0.5)\n p2 = Proj(proj='latlong', datum='WGS84')\n x, y = self.src.xy(row, col)\n lon, lat = transform(p1, p2, x, y)\n return lat, lon", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def get_rain_grid_coords(directory=\"rain_grid_coordinates\"):\n lon, lat = [pd.DataFrame([re.findall('..\\......', row[0]) for idx,\n row in pd.read_table(sys.path[0]+f\"/{directory}/{file}_center.txt\",\n header=None).iterrows()]) for file in ['lambda', 'phi']]\n coords = pd.DataFrame(columns={\"LAT\", \"LON\"})\n coords[\"LAT\"] = np.round(pd.Series([item for sublist in lat.values.tolist() for item in sublist]).astype(float), 4)\n coords[\"LON\"] = np.round(pd.Series([item for sublist in lon.values.tolist() for item in sublist]).astype(float), 4)\n coords[\"CELL_ID\"] = coords.index.values\n return coords", "def test_lat_lon_to_tile(self):\n\n lat = 48\n lon = 37.7\n z = 10\n\n tile_calculated = geomath.lat_lon_to_tile(lat,lon,z)\n tile_known = (619,355,10)\n\n # make sure the tiles are the same\n self.assertEqual(tile_calculated,tile_known)", "def cartesian2Geo(julian_date, x, y, z):\n\n\n # Calculate LLA\n lat, r_LST, ele = ecef2LatLonAlt(x, y, z)\n\n # Calculate proper longitude from the given JD\n lon, _ = LST2LongitudeEast(julian_date, np.degrees(r_LST))\n\n # Convert longitude to radians\n lon = np.radians(lon)\n\n\n return np.degrees(lat), np.degrees(lon), ele", "def coordinates_to_locations(coordinates):\n np_coords = np.array(coordinates)\n longs, lats = transform(Proj(init=EPSG_IN), Proj(init=EPSG_OUT), np_coords[:, 0], np_coords[:, 1])\n length = len(lats)\n result = []\n for i in range(length):\n loc = Location(lats[i], longs[i])\n raw_location = extract_raw_simple_coordinates(coordinates[i])\n loc.set_raw_coordinates_simplified(raw_location[0],raw_location[1])\n result.append(loc)\n\n return result", "def pixelloc_to_geoloc(row, col, loff, coff):\n row = np.atleast_1d(row)\n col = np.atleast_1d(col)\n\n assert row.shape == col.shape\n\n lat, lon = _nav.pixelloc_to_geoloc(row, col, loff, coff)\n\n if lat.size == lon.size == 1:\n # Return scalars\n return lat[0], lon[0]\n else:\n return lat, lon", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def coords_trans(map, coords, up_lift=True):\n if len(coords) < 3:\n coords = np.append(coords, [0])\n pass\n\n exact_location = carla.Location(x=coords[0], y=coords[1], z=coords[2])\n waypoint = map.get_waypoint(exact_location) # carla.waypoint\n road_center_location = waypoint.transform.location\n\n if up_lift == True:\n exact_location.z += 1.0\n road_center_location.z += 1.0\n\n return exact_location, waypoint, road_center_location", "def cr2lonlat_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n\n def composite(c, r):\n \"\"\"xform from (c, r) to (lon, lat)\"\"\"\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat\n \n return composite", "def _extract_location_xyz(self, log):\n\n if \"location\" in log:\n x = log[\"location\"][\"latitude\"]\n y = log[\"location\"][\"longitude\"]\n z = log[\"location\"][\"altitude\"]\n else:\n self.logger.debug(\"NaN case\")\n x = \"NaN\" # matlab Nan?\n y = \"NaN\"\n z = \"NaN\"\n pass\n return str(x) + \",\" + str(y) + \",\" + str(z)", "def _get_site_pos(dset):\n # TODO hjegei: Workaround -> better would it be if Position object can handle LLH as input format!!!\n x, y, z = gnss.llh2xyz(np.deg2rad(dset.lat), np.deg2rad(dset.lon), dset.height)\n return np.stack((x, y, z), axis=1)", "def transform_from_latlon(lat, lon):\n from affine import Affine\n lat = np.asarray(lat)\n lon = np.asarray(lon)\n trans = Affine.translation(lon[0], lat[0])\n scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])\n return trans * scale", "def get_lon_lat(df, nombre, ruido=False):\n from pyproj import transform\n\n lat, lon = [], []\n for index, row in tqdm(df.iterrows()):\n lati, loni = [], []\n try:\n for pt in list(row[\"geometry\"].exterior.coords):\n lati.append(pt[1])\n loni.append(pt[0])\n except Exception as e:\n try:\n row.geometry = row.geometry.map(lambda x: x.convex_hull)\n for pt in list(row[\"geometry\"].exterior.coords):\n lati.append(pt[1])\n loni.append(pt[0])\n except Exception as e:\n try:\n lati.append(df.iloc[index].geometry.centroid.y)\n loni.append(df.iloc[index].geometry.centroid.x)\n except Exception as e:\n if not ruido:\n continue\n else:\n print(e)\n print(df.iloc[index].geometry.centroid)\n lat.append(sum(lati) / len(lati))\n lon.append(sum(loni) / len(loni))\n latnew, lonnew = [], []\n for la, lo in zip(lat, lon):\n o, a = transform(inProj, outProj, lo, la)\n if o != float(\"inf\") and a != float(\"inf\"):\n latnew.append(a)\n lonnew.append(o)\n return {nombre: {\"lon\": lonnew, \"lat\": latnew}}" ]
[ "0.6428161", "0.63630664", "0.63455874", "0.6290571", "0.62546146", "0.62182456", "0.6173703", "0.616266", "0.61362135", "0.6058876", "0.60567707", "0.6025662", "0.5944653", "0.5914385", "0.5914179", "0.5903728", "0.5840953", "0.5805078", "0.57799006", "0.5759482", "0.5756983", "0.57522357", "0.5744789", "0.5735501", "0.57275176", "0.5725048", "0.57149935", "0.57080173", "0.5703541", "0.5695174", "0.56647784", "0.5645471", "0.564286", "0.5637938", "0.5632647", "0.5626208", "0.56168437", "0.56044114", "0.5603518", "0.5594503", "0.5583486", "0.55806255", "0.557618", "0.55756736", "0.55671084", "0.5556054", "0.5546488", "0.5537605", "0.55163", "0.5513004", "0.55056596", "0.5501348", "0.5489229", "0.5477959", "0.54767114", "0.5476362", "0.546256", "0.5451617", "0.5450375", "0.54466677", "0.54454416", "0.5443531", "0.5434458", "0.5425091", "0.54225725", "0.5419113", "0.5418988", "0.5407333", "0.5389768", "0.5387131", "0.5383527", "0.53759885", "0.53687567", "0.5368501", "0.53657705", "0.5356349", "0.5349488", "0.5348877", "0.5343891", "0.53345275", "0.53267634", "0.5321849", "0.5319546", "0.53193426", "0.53106993", "0.52949244", "0.5293606", "0.5292076", "0.52917945", "0.5277626", "0.52707195", "0.5269846", "0.5268678", "0.52681744", "0.5267684", "0.5261063", "0.52591974", "0.52544034", "0.52442634", "0.52431256" ]
0.5855182
16
Adds a shortcut between input and residual block and merges them with "sum"
def _shortcut(input, residual): # Expand channels of shortcut to match residual. # Stride appropriately to match residual (width, height) # Should be int if network architecture is correctly configured. input_shape = K.int_shape(input) residual_shape = K.int_shape(residual) stride_width = int(round(input_shape[2] / residual_shape[2])) stride_height = int(round(input_shape[3] / residual_shape[3])) stride_depth = int(round(input_shape[4] / residual_shape[4])) equal_channels = input_shape[1] == residual_shape[1] shortcut = input # 1 X 1 conv if shape is different. Else identity. if stride_width > 1 or stride_height > 1 or stride_depth > 1 or not equal_channels: shortcut = Convolution3D(residual_shape[1], 1, 1, 1, subsample=(stride_width, stride_height, stride_depth), border_mode = "valid", init="he_normal",W_regularizer=l2(0.0001))(input) return shortcut
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual_block(layer, filters):\n shortcut = layer\n layer = Conv2D(filters=filters, kernel_size=(3, 3),\n strides=(1, 1), padding=\"same\")(layer)\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n layer = Conv2D(filters=filters, kernel_size=(3, 3),\n strides=(1, 1), padding=\"same\")(layer)\n\n layer = Add()([layer, shortcut])\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n return layer", "def _output_add(block_x, orig_x):\n stride = orig_x.shape[-2] // block_x.shape[-2]\n strides = (stride, stride)\n if block_x.shape[-1] != orig_x.shape[-1]:\n orig_x = nn.avg_pool(orig_x, strides, strides)\n channels_to_add = block_x.shape[-1] - orig_x.shape[-1]\n orig_x = jnp.pad(orig_x, [(0, 0), (0, 0), (0, 0), (0, channels_to_add)])\n return block_x + orig_x", "def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d", "def _shortcut(self, input, residual):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[1] / residual_shape[1]))\n stride_height = int(round(input_shape[2] / residual_shape[2]))\n equal_channels = input_shape[3] == residual_shape[3]\n \n shortcut = input\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n shortcut = Conv2D(filters=residual_shape[3],\n kernel_size=(1, 1),\n strides=(stride_width, stride_height),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001))(input)\n \n return add([shortcut, residual])", "def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d", "def resBlock(_input, _stage, _block):\n nFilters = NBASEFILTERS * (2 ** _stage)\n if _stage > 0 and _block == 0: # downsample\n strides = 2\n else:\n strides = 1\n\n # 2 conv\n output = resLayer(_input, nFilters, _stride=strides)\n output = resLayer(output, nFilters, _acti=False)\n\n # shortcut\n if _stage > 0 and _block == 0: # projection\n # Caution: here the Keras document implementation differs from \n # the one mentioned before (in kernel size), and we use Keras's.\n _input = resLayer(_input, nFilters, _kernelSize=1,\n _stride=strides, _norm=False, _acti=False)\n\n output = keras.layers.add([_input, output])\n output = Activation('relu')(output)\n return output", "def reduce_recipe(self):\n\n self.recipe.reduce(self.crafting, self.crafting_stride)", "def residual_block(net, style_weights, filters, kernel_size, strides,\n depthwise_separable_conv):\n tmp = conv_block(net,\n style_weights,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n tmp = conv_block(tmp,\n style_weights,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n activation=None,\n depthwise_separable_conv=depthwise_separable_conv)\n return Add()([net, tmp])", "def _shortcut(input, residual):\r\n # Expand channels of shortcut to match residual.\r\n # Stride appropriately to match residual (width, height)\r\n # Should be int if network architecture is correctly configured.\r\n input_shape = K.int_shape(input)\r\n residual_shape = K.int_shape(residual)\r\n stride_width = int(round(input_shape[1] / residual_shape[1]))\r\n stride_height = int(round(input_shape[2] / residual_shape[2]))\r\n equal_channels = input_shape[3] == residual_shape[3]\r\n\r\n shortcut = input\r\n # 1 X 1 conv if shape is different. Else identity.\r\n if stride_width > 1 or stride_height > 1 or not equal_channels:\r\n shortcut = Conv2D(filters=residual_shape[3],\r\n kernel_size=(1, 1),\r\n strides=(stride_width, stride_height),\r\n padding=\"valid\",\r\n kernel_initializer=\"he_normal\",\r\n kernel_regularizer=l2(0.0001))(input)\r\n\r\n return add([shortcut, residual])", "def _shortcut(input, residual):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[1] / residual_shape[1]))\n stride_height = int(round(input_shape[2] / residual_shape[2]))\n equal_channels = input_shape[3] == residual_shape[3]\n\n shortcut = input\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n shortcut = Conv2D(filters=residual_shape[3],\n kernel_size=(1, 1),\n strides=(stride_width, stride_height),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001))(input)\n\n return add([shortcut, residual])", "def residual_block(self, x, stride, out_filter, activate_before_residual=False, padding_in='SAME'):\n with tf.name_scope(\"res_block\" + str(self._count_conv)):\n in_filter = x.get_shape().as_list()[-1]\n if activate_before_residual:\n with tf.variable_scope('shared_activation'):\n x = self.bn(x)\n x = self.activation(x)\n orig_x = x\n else:\n with tf.variable_scope('residual_only_activation'):\n orig_x = x\n x = self.bn(x)\n x = self.activation(x)\n\n with tf.variable_scope('sub1'):\n x = self.conv(x, 3, stride, out_filter, padding_in=padding_in)\n\n with tf.variable_scope('sub2'):\n x = self.bn(x)\n x = self.activation(x)\n x = self.conv(x, 3, 1, out_filter, padding_in=padding_in)\n\n with tf.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = tf.nn.avg_pool(orig_x, [1, stride, stride, 1], [1, stride, stride, 1], 'VALID')\n\n orig_x = tf.pad(\n orig_x, [[0, 0], [0, 0], [0, 0],\n [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])\n\n x += orig_x\n\n return x", "def residual_block(blockInput, nf, strides=1, batch_activate=False):\n x = BatchActivate(blockInput)\n x = res_conv_block(x, nf)\n x = res_conv_block(x, nf, activation=False)\n x = Add()([x, blockInput])\n if batch_activate:\n x = BatchActivate(x)\n return x", "def residual_block(x, filters: int, a=0.01, dr=0.05, depth=2):\n y = conv_block(x, filters, a, dr)\n for _ in range(depth - 1):\n y = conv_block(y, filters, a, dr)\n y = Add()([x, y])\n\n return y", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def _shortcut(input, residual):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))\n stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))\n equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]\n\n shortcut = input\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],\n kernel_size=(1, 1),\n strides=(stride_width, stride_height),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001))(input)\n\n return add([shortcut, residual])", "def residual_block(x,index,step=\"encoding\"):\r\n if step != \"encoding\":\r\n index = \"decode_{}\".format(index)\r\n \r\n x_new = kl.Conv2D(128,(3,3),padding=\"same\",activation=\"selu\",kernel_initializer=initializer,\r\n name=\"conv1_res_{}\".format(index))(x)\r\n \r\n x_new = kl.Conv2D(128,(3,3),padding=\"same\",kernel_initializer=initializer,\r\n name=\"conv2_res_{}\".format(index))(x_new)\r\n \r\n x_out = kl.Add()([x,x_new])\r\n x_out = kl.Activation(\"relu\")(x_out)\r\n return(x_out)", "def _residual_block(input, id_block, conv_block, mid_f, output_f, repetitions, stage, is_first_layer=False):\n\n for i in range(repetitions):\n if i == 0 and is_first_layer is True:\n input = conv_block(mid_f, output_f, stage, i, input, stride=(1, 1))\n elif i == 0 and is_first_layer is False:\n input = conv_block(mid_f, output_f, stage, i, input)\n else:\n input = id_block(mid_f, output_f, stage, i, input)\n return input", "def darknet53_residualBlock(inputs, filters, training, data_format, strides):\n shortcuts = inputs\n\n inputs = conv2D_fiexed_padding(inputs,\n filters=filters,\n kernel_size=1,\n strides=strides,\n data_format=data_format)\n inputs = batchNorm(inputs, training=training, data_format=data_format)\n inputs = tf.nn.leaky_relu(inputs, alpha=_LEAKY_RELU)\n\n inputs = conv2D_fiexed_padding(inputs,\n filters=2*filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format)\n inputs = batchNorm(inputs, training, data_format)\n inputs = tf.nn.leaky_relu(inputs, alpha=_LEAKY_RELU)\n\n inputs += shortcuts\n\n return inputs\n pass", "def _shortcut(input, residual):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))\n stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))\n stride_depth = int(round(input_shape[DEP_AXIS] / residual_shape[DEP_AXIS]))\n equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]\n\n shortcut = input\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or stride_depth > 1 or not equal_channels:\n shortcut = Conv3D(filters=residual_shape[CHANNEL_AXIS],\n kernel_size=(1, 1,1),\n strides=(stride_width, stride_height,stride_depth),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001))(input)\n\n return add([shortcut, residual])", "def sum_row(input, output, i):\r\n sum_ = input[i, :].sum()\r\n print(\"[Worker %d] Sum for row %d is %f\" % (os.getpid(), i, sum_))\r\n output[i] = sum_", "def convolutional_block(X, f, filters, stage, block, s = 2):\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value\n X_shortcut = X\n\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a',padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(F2, (f, f), strides = (1,1), name = conv_name_base + '2b', padding = 'same', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(F3, (1, 1), strides = (1,1), name = conv_name_base + '2c', padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH ####\n X_shortcut = Conv2D(F3, (1, 1), strides = (s,s), name = conv_name_base + '1', padding = 'valid', kernel_initializer = glorot_uniform(seed=0),kernel_regularizer = regularizers.l1_l2(l1= 0.01,l2 = 0.1))(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X,X_shortcut])\n X = Activation('relu')(X)\n \n return X", "def _shortcut(input, residual, name):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))\n stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))\n equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]\n\n shortcut = input\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],\n kernel_size=(1, 1),\n strides=(stride_width, stride_height),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001),\n\t\t name = name)(input)\n\n return add([shortcut, residual])", "def rrdb(x):\n h = dense_block(x)\n h = dense_block(h)\n h = dense_block(h)\n h = tf.keras.layers.Lambda(lambda x: x * 0.2)(h)\n out = tf.keras.layers.Add()([h, x])\n return out", "def __radd__(self,that):\n return self.__opExpand2(that,np.add)", "def building_block_v2(self, inputs, block_params, training, projection_shortcut,\n half_layer=None, initial_layer=False, no_prenorm=False):\n filters = block_params['filters']\n kernels = block_params['kernels']\n strides = block_params['strides']\n pad_stride1 = block_params['pad_stride1']\n\n shortcut = inputs\n if (not initial_layer) and (not no_prenorm):\n inputs = self.batch_norm_act(inputs, training)\n if projection_shortcut == 'FirstResUnit':\n # For pointnet, projection shortcut is not needed at the First ResUnit.\n # However, BN and Activation is still required at the First ResUnit for\n # pre-activation.\n shortcut = inputs\n projection_shortcut = None\n if self.IsShowModel: self.log(\n 'shortcut after activation identity for pointnet first res unit')\n if half_layer:\n projection_shortcut = None\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n with tf.variable_scope('c0'):\n inputs = self.conv1d2d3d(inputs, filters, kernels, strides, pad_stride1)\n self.log_tensor_c(inputs, kernels, strides, pad_stride1,\n tf.get_variable_scope().name)\n if half_layer: return inputs\n inputs = self.batch_norm_act(inputs, training)\n\n with tf.variable_scope('c1'):\n inputs = self.conv1d2d3d(inputs, filters, kernels, 1, 's')\n self.log_tensor_c(inputs, kernels, 1, 's',\n tf.get_variable_scope().name)\n\n if self.residual and (not initial_layer):\n assert inputs.shape == shortcut.shape\n if self.IsShowModel: self.log('Add shortcut*%0.1f'%(self.res_scale))\n return inputs * self.res_scale + shortcut\n else:\n return inputs", "def convolutional_block(X, f, filters, stage, block, s = 2):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value.\n X_shortcut = X\n\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (1, 1), strides = (s,s), padding = 'valid',name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path.\n X = Conv2D(F2, (f, f), strides = (1,1), padding = 'same',name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path.\n X = Conv2D(F3, (1, 1), strides = (1,1), padding = 'valid',name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n \n ##### SHORTCUT PATH ####\n X_shortcut= Conv2D(F3, (1, 1), strides = (s,s), padding = 'valid',name = conv_name_base + '1', kernel_initializer = glorot_uniform())(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n \n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def sum_value(self, lv, rv):", "def convolutional_block(X, filters, stage, block):\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2 = filters\n \n # Save the input value\n X_shortcut = X\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (3, 3), strides = (2, 2), padding = 'same', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(F2, (3, 3), strides = (1, 1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n\n ##### SHORTCUT PATH ####\n X_shortcut = Conv2D(F2, (3, 3), strides = (2, 2), padding = 'same', name = conv_name_base + '1', kernel_initializer = glorot_uniform())(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n return X", "def inline_sum(summands, seed):\n for r in summands:\n seed += r\n return seed", "def inline_sum(summands, seed):\n for r in summands:\n seed += r\n return seed", "def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):\n # Expand channels of shortcut to match residual.\n # Stride appropriately to match residual (width, height)\n # Should be int if network architecture is correctly configured.\n input_shape = K.int_shape(input_feature)\n residual_shape = K.int_shape(residual)\n stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))\n stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))\n equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]\n\n shortcut = input_feature\n # 1 X 1 conv if shape is different. Else identity.\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n # print('reshaping via a convolution...')\n shortcut = Conv2D(\n filters=residual_shape[CHANNEL_AXIS],\n kernel_size=(1, 1),\n strides=(stride_width, stride_height),\n padding=\"valid\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(0.0001),\n name=conv_name_base,\n )(input_feature)\n shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut)\n\n return add([shortcut, residual])", "def convolutional_block(X, f, filters, stage, block, s=2):\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value.\n X_shortcut = X\n\n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(filters = F2, kernel_size= (f,f),strides= (1,1), padding= 'same' , name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size= (1,1),strides= (1,1), padding= 'valid' , name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n #Add shortcut value to main path\n X_shortcut = Conv2D(filters = F3, kernel_size= (1,1),strides= (s,s), padding= 'valid' , name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = layers.add([X,X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def bottleneck_block_v2(self, inputs, block_params, training, projection_shortcut,\n half_layer=None, initial_layer=False, no_prenorm=False):\n filters = block_params['filters']\n kernels = block_params['kernels']\n strides = block_params['strides']\n pad_stride1 = block_params['pad_stride1']\n\n shortcut = inputs\n if (not initial_layer) and (not no_prenorm):\n inputs = self.batch_norm_act(inputs, training)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n with tf.variable_scope('c0'):\n inputs = self.conv1d2d3d(inputs, filters//4, 1, 1, 's')\n self.log_tensor_c(inputs, 1, 1, 's', tf.get_variable_scope().name)\n\n inputs = self.batch_norm_act(inputs, training)\n\n with tf.variable_scope('c1'):\n inputs = self.conv1d2d3d(inputs, filters//4, kernels, strides, pad_stride1)\n self.log_tensor_c(inputs, kernels, strides, pad_stride1,\n tf.get_variable_scope().name)\n\n inputs = self.batch_norm_act(inputs, training)\n\n with tf.variable_scope('c2'):\n inputs = self.conv1d2d3d(inputs, filters, 1, 1, 's')\n self.log_tensor_c(inputs, 1, 1, 's', tf.get_variable_scope().name)\n\n if self.residual:\n if not inputs.shape == shortcut.shape:\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n\n if self.IsShowModel: self.log('Add shortcut*%0.1f'%(self.res_scale))\n return inputs * self.res_scale + shortcut\n else:\n return inputs", "def additional_equations(self, k):\n ######################################################################\n # equation for saturated gas at hot side outlet\n o1 = self.outl[0].to_flow()\n self.residual[k] = o1[2] - h_mix_pQ(o1, 1)", "def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):\n if activate_before_residual: \n with tf.variable_scope('shared_activation'):\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n orig_x = x\n else:\n with tf.variable_scope('residual_only_activation'):\n orig_x = x\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.hps.relu_leakiness)\n\n with tf.variable_scope('sub1'):\n x = self._conv('conv1', x, 3, in_filter, out_filter, stride)\n\n with tf.variable_scope('sub2'):\n x = self._batch_norm('bn2', x)\n x = self._relu(x, self.hps.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])\n\n with tf.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')\n orig_x = tf.pad(\n orig_x, [[0, 0], [0, 0], [0, 0],\n [(out_filter-in_filter)//2, (out_filter-in_filter)//2]])\n x += orig_x\n\n tf.logging.debug('image after unit %s', x.get_shape())\n return x", "def final_sum(data: Iterator[str]) -> SnailfishNumber:\n return reduce(add, parse_input(data))", "def _shortcut(inputs, residual):\n inputs_shape = K.int_shape(inputs)\n residual_shape = K.int_shape(residual)\n equal_channels = inputs_shape[2] == residual_shape[2]\n shortcut = inputs\n # 1 x 1 conv if shape is different. Else identity.\n if not equal_channels:\n shortcut = Conv1D(filters=residual_shape[2],\n kernel_size=1, strides=1, padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(1.e-4))(inputs)\n return add([shortcut, residual])", "def weighted_sum(h):\n return h", "def _residual_block(block_function, filters, repetitions):\n def f(inputs):\n for i in range(repetitions):\n inputs = block_function(filters=filters, init_strides=1)(inputs)\n return inputs\n return f", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def res_block(input_tensor, kernel_size, filters, stage, \n use_bias=True, train_bn=True):\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + '_branch'\n bn_name_base = 'bn' + str(stage) + '_branch'\n\n with tf.name_scope(conv_name_base) as sc:\n x = KL.Conv2D(nb_filter1, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + '_out')(x)\n return x", "def funky_sum(a, b, mix):\n if mix <= 0:\n return a\n elif mix >= 1:\n return b\n else:\n return (1 - mix) * a + mix * b", "def get_shortcut(\n inputs,\n resnet_unit_label,\n block_id,\n features,\n stride,\n use_bias=False,\n kernel_initializer=\"he_normal\",\n bn_epsilon=1e-5,\n axis=3,\n):\n\n if block_id == 0:\n shortcut = Conv3D(\n features,\n (1, 1, 1),\n strides=stride,\n use_bias=use_bias,\n name=f\"resunit{resnet_unit_label}_block{block_id}_shortcut_conv\",\n kernel_initializer=kernel_initializer,\n )(inputs)\n\n shortcut = BatchNormalization(\n axis=axis,\n epsilon=bn_epsilon,\n name=f\"resunit{resnet_unit_label}_block{block_id}_shortcut_bn\",\n )(shortcut)\n return shortcut\n else:\n return inputs", "def block_sum(i, bins, C, n_u):\n s= 0.0\n for j in range(bins[i], bins[i+1]):\n for k in range(bins[i], bins[i+1]):\n s+= C[j][k]*n_u[j]*n_u[k]\n return s", "def residual_block(layer_input, filters=512, down_filter=False, normalization=False):\n\td1 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td1 = BatchNormalization(momentum=0.8)(d1) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\td1 = Activation('relu')(d1)\n\td2 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d1)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td2 = BatchNormalization(momentum=0.8)(d2) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\tif down_filter:\n\t\td = Add()([d1, d2])\n\telse:\n\t\td = Add()([d2, layer_input])\n\treturn d", "def _shortcut(self, x, res):\n # Expand channels of shortcut to match residual's\n # Stride should be int if network architecture is configured correctly\n # To match residual's height and width\n input_shape = x.get_shape().as_list()\n res_shape = res.get_shape().as_list()\n stride_h = int(round(input_shape[1] / res_shape[1]))\n stride_w = int(round(input_shape[2] / res_shape[2]))\n equal_channel = (input_shape[3] == res_shape[3])\n\n shortcut = x\n # # 1 X 1 conv if shape is different, else identity.\n if stride_h > 1 or stride_w > 1 or not equal_channel:\n filter_shape = [1, 1, input_shape[3], res_shape[3]]\n w = self._get_weight_variable(filter_shape)\n shortcut = tf.nn.conv2d(x, w, strides=[1, stride_h, stride_w, 1], padding='VALID')\n\n out = shortcut + res\n\n return out", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def simple_block(input_tensor, filters, prefix, kernel_size = 3, stride = 1,\n regularizer = None, activation = 'relu', conv_shortcut = False, bn = True):\n \n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + prefix\n bn_name_base = 'bn' + prefix\n\n x = Conv2D(filters[1], kernel_size, padding='same', strides=(stride, stride),\n kernel_regularizer = regularizer,\n name=conv_name_base + 'x')(input_tensor)\n if bn:\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + 'x')(x)\n x = Activation(activation)(x)\n\n x = Conv2D(filters[1], kernel_size, padding='same',\n kernel_regularizer = regularizer,\n name=conv_name_base + 'y')(x)\n if bn:\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + 'y')(x)\n\n shortcut = input_tensor\n if (filters[0] != filters[1]) and conv_shortcut:\n shortcut = Conv2D(filters[1], (1, 1), strides=(stride, stride),\n kernel_regularizer = regularizer,\n name=conv_name_base + 'z')(shortcut)\n if bn:\n shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + 'z')(shortcut)\n else:\n if stride > 1:\n shortcut = AveragePooling2D((stride, stride), name='avg'+prefix)(shortcut)\n if filters[0] < filters[1]:\n shortcut = ChannelPadding(((filters[1] - filters[0]) // 2, filters[1] - filters[0] - (filters[1] - filters[0]) // 2),\n name = 'pad'+prefix)(shortcut)\n\n x = layers.add([x, shortcut])\n x = Activation(activation)(x)\n return x", "def add_block(self, _input, growth_rate, layers_per_block):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"layer_%d\" % layer):\n output = self.add_internal_layer(output, growth_rate)\n return output", "def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(1, 1)):\n # retriving filters\n F1, F2, F3 = filters\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x_shortcut = input_tensor\n\n # First component of main path\n x = Conv2D(filters=F1, kernel_size=(1, 1), strides=strides, name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n # Second component of main path\n x = Conv2D(filters=F2, kernel_size=(kernel_size, kernel_size), strides=(1, 1), padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n # Third component of main path\n x = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)\n\n # shortcut convolution layer\n x_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=strides, padding='valid', name=conv_name_base + '1')(\n x_shortcut)\n x_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(x_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n x = add([x, x_shortcut])\n x = Activation('relu')(x)\n\n return x", "def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,\n strides, data_format):\n shortcut = inputs\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n shortcut = batch_norm(inputs=shortcut, training=training,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs += shortcut\n inputs = tf.nn.relu(inputs)\n\n return inputs", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def test_local_sum_sum_int8(self):\r\n x = tensor.tensor3(dtype='int8')\r\n y = x.sum(axis=0).sum(axis=1)\r\n backup = config.on_opt_error\r\n config.on_opt_error = 'raise'\r\n try:\r\n # This compilation would fail prior to fix.\r\n f = theano.function([x], y)\r\n finally:\r\n config.on_opt_error = backup", "def build_resnet_block(inputres, dim, name=\"resnet\", padding=\"REFLECT\"):\n with tf.variable_scope(name):\n out_res = tf.pad(inputres, [[0, 0], [1, 1], [\n 1, 1], [0, 0]], padding)\n out_res = general_conv2d(\n out_res, dim, 3, 3, 1, 1, 0.02, \"VALID\", \"c1\")\n out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)\n out_res = general_conv2d(\n out_res, dim, 3, 3, 1, 1, 0.02, \"VALID\", \"c2\", do_relu=False)\n\n return tf.nn.relu(out_res + inputres)", "def residual_block_(inputs,\n filters,\n is_training,\n strides,\n use_projection=False,\n pruning_method='baseline',\n init_method='baseline',\n data_format='channels_first',\n end_sparsity=0.,\n weight_decay=0.,\n name=''):\n shortcut = inputs\n if use_projection:\n # Projection shortcut in first layer to match filters and strides\n end_point = 'residual_projection_%s' % name\n shortcut = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=1,\n strides=strides,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n shortcut = batch_norm_relu(\n shortcut, is_training, relu=False, data_format=data_format)\n\n end_point = 'residual_1_%s' % name\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n inputs = batch_norm_relu(\n inputs, is_training, data_format=data_format)\n\n end_point = 'residual_2_%s' % name\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=1,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n inputs = batch_norm_relu(\n inputs, is_training, relu=False, init_zero=True, data_format=data_format)\n\n return tf.nn.relu(inputs + shortcut)", "def ResBlock(input_tensor, filters):\n \n conv_1 = Conv2D(filters = filters, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal') \n conv_1a = conv_1(input_tensor) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1a)\n relu_1 = Activation(\"relu\")(batch_1)\n drop_1 = Dropout(drop)(relu_1)\n conv_1b = conv_1(drop_1) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1b)\n return batch_1", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def reduce_sum_encoder(inputs):\n return tf.reduce_sum(inputs, axis=1)", "def _residual_block(block_function, filters, repetitions, is_first_layer=False):\n def f(input):\n for i in range(repetitions):\n init_strides = (1, 1)\n if i == 0 and not is_first_layer:\n init_strides = (2, 2)\n input = block_function(filters=filters, init_strides=init_strides,\n is_first_block_of_first_layer=(is_first_layer and i == 0))(input)\n return input\n\n return f", "def _combine_task_specific_block(\n feature,\n weights,\n biases,\n model_spec,\n scope=\"combine_task_specific_block\"\n ):\n with tf.variable_scope(scope):\n task_dim = weights[0][0].shape[0].value\n task_ones = tf.ones(shape=[task_dim])\n feature_task_expanded = tf.einsum(\n \"ik,j->ijk\",\n feature,\n task_ones\n )\n weights_combined = []\n biases_combined = []\n for i in range(len(weights[0])):\n weights_combined.append(\n weights[0][i] + weights[1][i]\n )\n biases_combined.append(\n biases[0][i] + biases[1][i]\n )\n # in the special case only two layers: len(weights_combined) == 2 #\n hidden_a = tf.einsum(\n \"ijk,jlk->ijl\",\n feature_task_expanded,\n weights_combined[0]\n )\n hidden_a = hidden_a + biases_combined[0]\n if model_spec[\"activation\"] is not None:\n hidden_a = model_spec[\"activation\"](hidden_a)\n hidden_a_dropout = tf.layers.dropout(\n hidden_a,\n rate=model_spec[\"dropout_task_hidden_a\"],\n name=\"task_hidden_a_dropout\"\n )\n logits = tf.einsum(\n \"ijk,jlk->ijl\",\n hidden_a_dropout,\n weights_combined[1]\n )\n logits = logits + biases_combined[1]\n return logits", "def sum_plus(t, init):\n total = init\n for x in t:\n total += x\n return total", "def add_block2(_input, growth_rate, layers_per_block, bc_mode, is_training):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"layer_%d\" % layer):\n output = add_internal_layer(output, growth_rate, bc_mode=bc_mode, is_training=is_training)\n return output", "def plus(self, layer):\n\n input1 = self.node(layer)\n if not input1:\n return\n LOGGER.debug('Plus layer to last:%s', layer)\n if not self.last_node:\n self.last_node = nuke.nodes.Constant()\n\n if layer not in self.layers():\n input1 = nuke.nodes.Shuffle(inputs=[input1], out=layer)\n self.last_node = nuke.nodes.Merge2(\n inputs=[self.last_node, input1], operation='plus',\n also_merge=layer if layer not in self.layers() else 'none',\n label=utf8(self.l10n(layer)),\n output='rgb')", "def _residual(self,\n x,\n in_filter,\n out_filter,\n stride,\n activate_before_residual=False):\n\n if activate_before_residual:\n with tf.variable_scope('shared_activation'):\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self._params.relu_leakiness)\n orig_x = x\n else:\n with tf.variable_scope('residual_only_activation'):\n orig_x = x\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self._params.relu_leakiness)\n\n with tf.variable_scope('sub1'):\n x = self._conv('conv1', x, 3, in_filter, out_filter, stride)\n\n with tf.variable_scope('sub2'):\n x = self._batch_norm('bn2', x)\n x = self._relu(x, self._params.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])\n\n with tf.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')\n orig_x = tf.pad(\n orig_x, [[0, 0], [0, 0], [0, 0], [(out_filter - in_filter) // 2,\n (out_filter - in_filter) // 2]])\n x += orig_x\n\n return x", "def residual(n_filters, input):\n shape = input.shape\n _, h, w, d = shape\n l1 = Conv2D(n_filters, (5, 5), padding='valid', activation='elu')(input)\n l2 = Conv2D(n_filters, (1, 1), padding='valid', activation='linear')(l1)\n l3 = Cropping2D(cropping=2)(input)\n added = Add()([l2, l3])\n return added", "def _res_block(self, X, name, out_channels):\n \n X_shortcut = X\n X = self._mfm(X, name = name + '_mfm1', out_channels=out_channels, kernel_size=3, strides=1)\n X = self._mfm(X, name = name + '_mfm2', out_channels=out_channels, kernel_size=3, strides=1)\n X = Add()([X, X_shortcut])\n return X", "def sum_():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n try:\n s = (yield)\n while True:\n s += (yield)\n except GeneratorExit:\n target.send(s)\n target.close()\n\n return _dagpype_internal_fn_act", "def make_accumulator():\n pass # replace with your solution", "def sum_sum(t, init):\n return sum(t, init)", "def additional_equations(self, k):\n ######################################################################\n # equation for saturated liquid at hot side outlet\n if self.subcooling.val is False:\n o1 = self.outl[0].to_flow()\n self.residual[k] = o1[2] - h_mix_pQ(o1, 0)\n k += 1", "def identity_block(input_tensor, kernel_size, filters, stage, block):\n filters0, filters1, filters2 = filters\n conv_name_base = 'res' + str(stage) + block\n bn_name_base = 'bn' + str(stage) + block\n add_name = 'add' + str(stage) + \"_\" + block\n relu_name = 'relu' + str(stage) + \"_\" + block\n\n # Tensors\n input_tensor_chans = input_tensor.dims(\n 3) if input_tensor.shape.layout == sg.NHWC else input_tensor.dims(1)\n conv0_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters0, 1, 1, input_tensor_chans)))\n bn0_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n conv1_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters1, kernel_size, kernel_size, filters0)))\n bn1_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n conv2_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, filters1)))\n bn2_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n\n x = sg.nn.convolution(\n input_tensor, conv0_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2a')\n x = sg.nn.batch_norm(\n x, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2a')\n x = sg.nn.convolution(\n x, conv1_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2b')\n x = sg.nn.batch_norm(\n x, bn1_mean_tensor, bn1_var_tensor, bn1_gamma_tensor, bn1_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2b')\n x = sg.nn.convolution(\n x, conv2_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2c')\n x = sg.nn.batch_norm(\n x, bn2_mean_tensor, bn2_var_tensor, bn2_gamma_tensor, bn2_beta_tensor,\n name=bn_name_base + '_2c')\n x = sg.math.add(x, input_tensor, name=add_name)\n x = sg.nn.relu(x, name=relu_name)\n return x", "def get_sum(a,b):\n return", "def __iadd__(self, term):\n self.add(term)\n return self", "def add(self, term):\n self._value = self.accum_param.addInPlace(self._value, term)", "def _building_block_v1(inputs, filters, training, projection_shortcut, strides,\n data_format):\n shortcut = inputs\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n shortcut = batch_norm(inputs=shortcut, training=training,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs += shortcut\n inputs = tf.nn.relu(inputs)\n\n return inputs", "def _building_block_v2(inputs, filters, training, \n projection_shortcut, strides,\n data_format):\n shortcut = inputs\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n ENDING_POINTS.append(inputs)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=strides,\n data_format=data_format, time_stride=strides)\n\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def identity_block(X, f, filters, stage, block):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value. This is needed later to add back to the main path. \n X_shortcut = X\n \n ##### MAIN PATH #####\n # First component of main path.\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n \n # Second component of main path.\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n \n # Third component of main path.\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n\n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def residual_module(in_block, K, stride, chanDim, red=False,\n reg= 1e-4, bnEps=2e-5, bnMom=.9):\n\n x = in_block\n chan_in = in_block.shape.as_list()[-1]\n bn1 = BatchNormalization(axis=chanDim, epsilon=bnEps, momentum=bnMom)(x)\n relu1 = Activation(\"relu\")(bn1)\n conv1 = Conv2D(filters=int(K * .25), kernel_size=(1, 1),\n use_bias=False,\n kernel_regularizer=l2(reg))(relu1) #Conv2D learns 1/4(0.25) of the last conv filter\n\n bn2 = BatchNormalization(axis=chanDim, epsilon=bnEps, momentum=bnMom)(conv1)\n relu2 = Activation(\"relu\")(bn2)\n conv2 = Conv2D(filters=int(K * .25), kernel_size=(3, 3), strides=stride,\n padding=\"same\",\n use_bias=False,\n kernel_regularizer=l2(reg))(relu2) #Conv2D learns 1/4(0.25) of the last conv filter\n\n bn3 = BatchNormalization(axis=chanDim, epsilon=bnEps, momentum=bnMom)(conv2)\n relu3 = Activation(\"relu\")(bn3)\n conv3 = Conv2D(filters=K, kernel_size=(1, 1),\n kernel_regularizer=l2(reg))(relu3)\n\n if red:\n x = Conv2D(filters=K, kernel_size=(1, 1), strides=stride,\n kernel_regularizer=l2(reg))(relu1)\n x = se_block(x)\n return add([x, conv3])", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def add_block(_input, growth_rate, layers_per_block, bc_mode, name, is_training):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"%s_layer_%d\" % (name, layer)):\n output = add_internal_layer(output, growth_rate, bc_mode=bc_mode, is_training=is_training)\n return output", "def __add__(self, region):\n return Sequence(\n self.weight + region.weight,\n region.finish,\n (*self.regs, region)\n )", "def __iadd__(self,that):\n #return self.__opExpand1(that,np.add, out=self)\n return self.__opExpand2(that,np.add, out=self)", "def add(self, params):\n if len(params) < 2:\n return\n x = self.reg_dct[params[0]]\n y = self.reg_dct[params[1]]\n self.reg_dct[params[0]] = (x + y) % (2** 32)", "def test_tensor_can_be_added_summation(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n x = IndexedBase('x')\n y = IndexedBase('y')\n\n tensor = dr.sum((i, p.R), x[i, j] * y[j, i])\n\n for res in [\n dr.einst(tensor),\n dr.sum((j, p.R), tensor)\n ]:\n assert res == dr.einst(x[i, j] * y[j, i])", "def sum(self, start=0, end=None):\n return super().reduce(start, end)", "def attn_sum_bahdanau(v_attn, keys, query):\n return tf.reduce_sum(v_attn * tf.tanh(keys + tf.expand_dims(query, 1)), [2])", "def add_input(self, accumulator, batch: input_batch.InputBatch) -> int:\n null_mask = batch.null_mask(self._path)\n if self._required_paths:\n required_null_mask = batch.all_null_mask(*self._required_paths)\n null_mask = null_mask & ~required_null_mask\n return accumulator + np.sum(null_mask)", "def chamfer_sum(a,b):\n #tf.print(a)\n #tf.print(a[0,0,:])\n #tf.print(b[0,0,:])\n M = pairwise_distances(a, b)\n if len(M.shape) == 2:\n M = tf.expand_dims(M,0) #[np.newaxis, :, :]\n #return tf.keras.backend.sum(tf.reduce_sum(tf.reduce_min(M, 1), 1) + tf.reduce_sum(tf.reduce_min(M, 2), 1))\n c=tf.reduce_sum(tf.reduce_min(M, 1), 1) + tf.reduce_sum(tf.reduce_min(M, 2), 1)\n #tf.print(tf.reduce_sum(c))\n return c", "def block_reduction_a(self, inputs, scope=None, reuse=None):\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')\n branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])", "def addition_homework(data: Iterator[str]) -> int:\n n = final_sum(data)\n return n.magnitude", "def conv_block(\n input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n filters0, filters1, filters2 = filters\n conv_name_base = 'res' + str(stage) + block\n bn_name_base = 'bn' + str(stage) + block\n add_name = 'add' + str(stage) + \"_\" + block\n relu_name = 'relu' + str(stage) + \"_\" + block\n\n # sg.Tensors\n input_tensor_chans = input_tensor.dims(\n 3) if input_tensor.shape.layout == sg.NHWC else input_tensor.dims(1)\n conv0_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters0, 1, 1, input_tensor_chans)))\n bn0_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n conv1_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters1, kernel_size, kernel_size, filters0)))\n bn1_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n conv2_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, filters1)))\n bn2_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n conv3_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, input_tensor_chans)))\n bn3_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n\n x = sg.nn.convolution(\n input_tensor, conv0_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2a')\n x = sg.nn.batch_norm(\n x, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor,\n activation=\"relu\")\n x = sg.nn.convolution(\n x, conv1_tensor, stride=strides, padding=\"same\",\n name=conv_name_base + '_2b')\n x = sg.nn.batch_norm(\n x, bn1_mean_tensor, bn1_var_tensor, bn1_gamma_tensor, bn1_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2b')\n x = sg.nn.convolution(\n x, conv2_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2c')\n x = sg.nn.batch_norm(\n x, bn2_mean_tensor, bn2_var_tensor, bn2_gamma_tensor, bn2_beta_tensor,\n name=bn_name_base + '_2c')\n shortcut = sg.nn.convolution(\n input_tensor, conv3_tensor, stride=strides, padding=\"same\",\n name=conv_name_base + '_1')\n shortcut = sg.nn.batch_norm(\n shortcut, bn3_mean_tensor, bn3_var_tensor, bn3_gamma_tensor,\n bn3_beta_tensor, name=bn_name_base + '_1')\n x = sg.math.add(x, shortcut, name=add_name)\n x = sg.nn.relu(x, name=relu_name)\n return x", "def addRes(res1, res2):\n return [(x + y) for x, y in zip(res1, res2)]", "def combine_and_select_block(self, first):\n block = self.combine_block(first)\n self.combined.append(block)", "def compute_partials(self, inputs, partials):\n partials['y', 'x'] = 2.0" ]
[ "0.6083529", "0.5894693", "0.5735994", "0.56371546", "0.5619291", "0.5514519", "0.5502387", "0.5502087", "0.54550755", "0.54191214", "0.54108816", "0.5387289", "0.53559756", "0.5333165", "0.53286403", "0.5305475", "0.5285419", "0.5280931", "0.5280322", "0.52757835", "0.52651584", "0.5226238", "0.51707584", "0.5166492", "0.5157738", "0.5149284", "0.5146962", "0.5143549", "0.5135152", "0.5133739", "0.5133739", "0.51181805", "0.5112227", "0.50783056", "0.50556105", "0.5049382", "0.50405335", "0.5034451", "0.501865", "0.5014752", "0.501416", "0.5004759", "0.50024974", "0.50016445", "0.49969983", "0.49855515", "0.49635902", "0.4958141", "0.49513847", "0.49402955", "0.4933999", "0.493056", "0.49005392", "0.49005392", "0.49005392", "0.49005392", "0.49005392", "0.48994178", "0.48689172", "0.48619723", "0.48616308", "0.486131", "0.486131", "0.48606828", "0.48468104", "0.4839924", "0.48362008", "0.48349166", "0.4833033", "0.48290336", "0.482136", "0.48125598", "0.48067382", "0.48051438", "0.47974223", "0.4793421", "0.47857055", "0.47842354", "0.4776741", "0.47717035", "0.47597125", "0.47560155", "0.47559467", "0.47557437", "0.47436547", "0.47431573", "0.4741686", "0.47385788", "0.47341672", "0.47335073", "0.47303116", "0.47294116", "0.47254378", "0.47253445", "0.47228637", "0.47226715", "0.4718978", "0.4718711", "0.47092474", "0.47049385" ]
0.515336
25
Patching out the functions in CrimeIncidentsIntent that use requests.get
def setUp(self): super().setUp() self.get_crime_incident_response = \ mock.patch( ('mycity.intents.crime_activity_intent.' 'get_crime_incident_response'), return_value=test_constants.GET_CRIME_INCIDENTS_API_MOCK) self.get_crime_incident_response.start() response = self.controller.on_intent(self.request) for record in MOCK_RESPONSE[RESULT][RECORDS]: self.assertIn(record[STREET], response.output_speech)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _external_request(self, method, url, *args, **kwargs):\n self.last_url = url\n if url in self.responses.keys() and method == 'get':\n return self.responses[url] # return from cache if its there\n\n headers = kwargs.pop('headers', None)\n custom = {'User-Agent': useragent}\n if headers:\n headers.update(custom)\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = custom\n\n response = getattr(requests, method)(url, *args, **kwargs)\n\n if self.verbose:\n print(\"Got Response: %s\" % url)\n\n if response.status_code == 503:\n raise SkipThisService(\"Service returned 503 - Temporarily out of service.\")\n\n if method == 'get':\n self.responses[url] = response # cache for later\n\n self.last_raw_response = response\n return response", "def test_request_should_not_include_token(self):\n client = Client()\n\n with patch(\"requests.request\") as request:\n request.return_value.json.return_value = {}\n\n client.request(\"GET\", \"http://www.google.com/\")\n\n request.assert_called_once_with(\n \"GET\",\n \"http://www.google.com/\",\n headers=None,\n json=None,\n params=b\"per_page=100\",\n )", "def mocked_requests_scrapping_get(*args, **kwargs):\n class MockResponse:\n def __init__(self, json_data, status_code, url):\n self.content = json_data\n self.status_code = status_code\n self.url = url\n self.cookies = {\"JSESSIONID\": \"jkghhjgjhgfjgfgjg\"}\n self.encoding = \"utf-8\"\n\n def json(self):\n return self.json_data\n\n dn = os.path.dirname(os.path.realpath(__file__))\n for url, provider in {f\"{settings.BASE_URL}/eAnnuaire/formulaire?appelRetour=true\": \"form\",\n f\"{settings.BASE_URL}/eAnnuaire/resultat\": \"suivant\",\n f\"{settings.BASE_URL}/eAnnuaire/fiche\": \"detail\"}.items():\n if args[0].startswith(url):\n with open(os.path.join(dn, \"fixtures\", f\"{provider}.html\"), \"rb\") as fp:\n return MockResponse(fp.read(), 200, args[0])", "def get(self, request):\n pass", "def test_client_can_load_client_page_requests_directly(self):\n\n req = self.httpbin_3.get_request_data('get_my_ip')\n\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['get_my_ip'])\n req = self.httpbin_3.get_request_data('test_requests_patch_method')\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['test_requests_patch_method'])\n req = self.httpbin_3.get_request_data('test_requests_delete_method')\n self.assertEqual(req, self.httpbin_3.client[\"second_page\"]['test_requests_delete_method'])\n\n req = self.httpbin_4.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_4.client['get_my_ip'])\n req = self.httpbin_4.get_request_data('get_user_my_agent')\n self.assertEqual(req, self.httpbin_4.client['get_user_my_agent'])\n req = self.httpbin_4.get_request_data('test_requests_put_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_put_method'])\n req = self.httpbin_4.get_request_data('test_requests_post_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_post_method'])", "def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)", "def request(self, url, *args, **kwargs):\n raise NotImplementedError", "def aget(url, **kwargs):\n return requests.get(url, **kwargs)", "def rhsa(monkeypatch, mock_get):\n monkeypatch.delattr('requests.sessions.Session.request')\n monkeypatch.setattr(ErrataConnector, '_auth', None)\n monkeypatch.setattr(requests, 'get', mock_get)\n return Erratum(errata_id=36762)", "def requester(get_args: dict) -> dict:\n get_args.update(dict(apikey = apikey))\n response = requests.get(URL, params = get_args)\n return response.json()", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def get_request(req_context, uri):\n headers = { 'Accept': \"application/json\", \n 'User-Agent': \"testApp\"\n }\n if config.ENVIRONMENT == \"Sandbox\":\n base_url = \"https://sandbox-quickbooks.api.intuit.com/v3/company/\"\n else:\n base_url = \"https://quickbooks.api.intuit.com/v3/company/\"\n url = base_url + req_context.realm_id + uri\n print(url)\n if config.AUTH_TYPE == \"OAuth2\":\n headers['Authorization'] = \"Bearer \" + req_context.access_token\n req = requests.get(url, headers=headers)\n else:\n auth = OAuth1(req_context.consumer_key, req_context.consumer_secret, req_context.access_key, req_context.access_secret)\n req = requests.get(url, auth=auth, headers=headers)\n return req", "def crime_data_fbi(request):\n http = urllib3.PoolManager()\n\n # Getting the request arguments for city, distance , start date and end date \n\n #request_args = request.args\n #print('request_args')\n #ori=request_args['ori']\n #start_date=request_args['start_date']\n #end_date=request_args['end_date']\n #distance=request_args['distance']\n base_url=fbi_url(request)\n print(base_url)\n \n # New request url \n request_url = base_url\n print(request_url)\n \n print(\"i am inside the functino\")\n payload = http.request('GET',\n request_url,\n headers={\n 'Content-Type': 'application/json',\n 'x-api-key': creds\n },\n fields={\n 'API_KEY':creds\n }\n )\n\n #*** only changing it for testing ***\n #return request_url\n print(payload)\n return payload", "def get_request(url):\n\tr = requests.get(url)\n\treturn(r)", "def mock_requests_get(mocker, mocked_requests_get):\n\n def _requests_get(module, content, status):\n mock_func = mocker.patch(f'{module}.requests.get')\n mock_func.return_value = mocked_requests_get(content, status)\n\n return _requests_get", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def request( # pylint: disable=arguments-differ\n self, method: str, url: str, **kwargs\n ) -> object:\n if self.base_url is not None and not url.startswith('https'):\n url = f'{self.base_url}{url}'\n\n # this kwargs value is used to signal 429 handling that this is a retry, but the super\n # method doesn't expect it so it needs to be removed.\n tc_is_retry = kwargs.pop('tc_is_retry', False)\n\n response: Response = super().request(method, url, **kwargs)\n\n if response.status_code == 429 and not tc_is_retry:\n too_many_requests_handler = self.too_many_requests_handler\n time.sleep(too_many_requests_handler(response))\n kwargs['tc_is_retry'] = True\n return self.request(method, url, **kwargs)\n\n # APP-79 - adding logging of request as curl commands\n if not response.ok or self.log_curl:\n try:\n self.log.debug(\n self.requests_to_curl.convert(\n response.request,\n mask_body=self.mask_body,\n mask_headers=self.mask_headers,\n mask_patterns=self.mask_patterns,\n proxies=self.proxies,\n verify=self.verify,\n )\n )\n except Exception: # nosec\n pass # logging curl command is best effort\n\n self.log.debug(\n f'feature=external-session, request-url={response.request.url}, '\n f'status_code={response.status_code}, elapsed={response.elapsed}'\n )\n\n return response", "def do_request(method, url, data=None, headers=None):\n try:\n if method == 'GET':\n resp = requests.get(url, headers=headers)\n return resp\n elif method == 'POST':\n resp = requests.post(url, json=data, headers=headers)\n return resp\n elif method == 'PATCH':\n resp = requests.patch(url, json=data, headers=headers)\n return resp\n except Exception, e:\n print \"Retry {} with {}, {}\".format(str(e), url, data)\n raise e", "def cached():\n ##from pprint import pprint\n # let's restrict this to the api server, to avoid shenanigans\n root_relative_url = request.env.request_uri.split('/cached/')[-1]\n ##pprint('ROOT-RELATIVE URL: ')\n ##pprint(root_relative_url)\n fetch_url = '%s://%s/%s' % (request.env.wsgi_url_scheme, request.env.http_host, root_relative_url)\n ##pprint('PROXYING TO SIMPLE URL: ')\n ##pprint(fetch_url)\n\n # permissive CORS handling of requests from another domain (e.g. tree.opentreeoflife.org)\n if request.env.request_method == 'OPTIONS':\n if request.env.http_access_control_request_method:\n response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method\n if request.env.http_access_control_request_headers:\n response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers\n ##pprint('RESPONDING TO OPTIONS')\n raise HTTP(200, **(response.headers))\n\n # N.B. This try/except block means we'll cache errors. For now, the fix is to clear the entire cache.\n try:\n # fetch the latest IDs as JSON from remote site\n import simplejson\n\n if fetch_url.startswith('//'):\n # Prepend scheme to a scheme-relative URL\n fetch_url = \"http:%s\" % fetch_url\n\n fetch_args = request.vars # {'startingTaxonOTTId': \"\"}\n\n # TODO: For more flexibility, we should examine and mimic the original request (HTTP verb, headers, etc)\n\n # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API\n # N.B. that gluon.tools.fetch() can't be used here, since it won't send \"raw\" JSON data as treemachine expects\n req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={\"Content-Type\": \"application/json\"}) \n the_response = urllib2.urlopen(req).read()\n ##pprint('RESPONSE:')\n ##pprint(the_response)\n return the_response\n\n except Exception, e:\n # throw 403 or 500 or just leave it\n return ('ERROR', e.message)", "def live_url_request(url, arg):\n if arg == \"-m\":\n json_response = requests.get(url)\n\n elif arg == \"-l\":\n live_token= get_live_token()\n json_response = requests.get(url, headers={\"PRIVATE-TOKEN\": live_token})\n return json_response", "def _request(self, *args):\n raise NotImplementedError", "def getFromCache(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _get(self, url):\n return self._request(url)", "def mock_requests_get(request, monkeypatch, mock_get_args):\n\n def mock_get(url, *args, **kwargs):\n mock_get_args(url, *args, **kwargs)\n\n mock_resp = MagicMock()\n if url == qml.data.data_manager.FOLDERMAP_URL:\n json_data = _folder_map\n elif url == qml.data.data_manager.DATA_STRUCT_URL:\n json_data = _data_struct\n else:\n json_data = None\n\n mock_resp.json.return_value = json_data\n if hasattr(request, \"param\"):\n mock_resp.content = request.param\n\n return mock_resp\n\n monkeypatch.setattr(qml.data.data_manager, \"get\", mock_get)\n\n return mock_get", "def _call_api(self, verb, url, **request_kwargs):\n api = 'https://api.github.com{}'.format(url)\n auth_headers = {'Authorization': 'token {}'.format(self.api_token)}\n headers = {**auth_headers, **request_kwargs.pop('headers', {})}\n return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "def patch_get(request):\n try:\n mp = request.getfixturevalue(\"monkeypatch\")\n except AttributeError: # pytest < 3\n mp = request.getfuncargvalue(\"monkeypatch\")\n mp.setattr(requests.Session, 'request', get_mockreturn)\n return mp", "def step_impl(context, query):\n url = context.base_url+query\n print('url:',url,'\\n')\n with closing(requests.get(url)) as response:\n context.response = response\n context.response_json = response.json()", "def url_for_request(self, method, extras):\n raise NotImplementedError(\"Should be overriden by subclass\")", "def test_request_extra_allowed_codes():\n mocked_codes = {\"codes\": [HTTPCreated.code, HTTPOk.code, HTTPCreated.code]} # note: used in reverse order\n\n def mocked_request(*_, **__):\n mocked_resp = Response()\n mocked_resp.status_code = mocked_codes[\"codes\"].pop()\n return mocked_resp\n\n with mock.patch(\"requests.Session.request\", side_effect=mocked_request) as mocked:\n resp = request_extra(\"get\", \"http://whatever\", retries=3, allowed_codes=[HTTPOk.code])\n assert resp.status_code == HTTPOk.code\n assert mocked.call_count == 2", "def GetCustomInterest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _make_request_with_auth_fallback(self, url, headers=None, params=None):\n self.log.debug(\"Request URL and Params: %s, %s\", url, params)\n try:\n resp = requests.get(\n url,\n headers=headers,\n verify=self._ssl_verify,\n params=params,\n timeout=DEFAULT_API_REQUEST_TIMEOUT,\n proxies=self.proxy_config,\n )\n resp.raise_for_status()\n except requests.exceptions.HTTPError as e:\n self.log.debug(\"Error contacting openstack endpoint: %s\", e)\n if resp.status_code == 401:\n self.log.info('Need to reauthenticate before next check')\n\n # Delete the scope, we'll populate a new one on the next run for this instance\n self.delete_current_scope()\n elif resp.status_code == 409:\n raise InstancePowerOffFailure()\n elif resp.status_code == 404:\n raise e\n else:\n raise\n\n return resp.json()", "def __call__(self, request):\n response = self.get_request(request)\n return response", "def test_request_fetch(response, mocker):\n mocker.patch(\"requests.get\", autospec=True)\n requests.get.return_value = response\n request.fetch(\"http://localhost\")\n requests.get.assert_called_with(\"http://localhost\", request.headers)", "def requestURL(userID): #@NoSelf", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def _Dynamic_Fetch(self, request, response):\n print \"Request:\"\n print (\"Request: {}\").format(request)\n response.set_content(self.mock_response_issue)\n response.set_statuscode(200)\n new_header = response.add_header()\n new_header.set_key('Content-type')\n new_header.set_value('application/json')\n\n response.set_finalurl(request.url)\n response.set_contentwastruncated(False)\n\n # allow to query the object after it is used\n # pylint: disable=attribute-defined-outside-init\n self.request = request\n self.response = response", "def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):\n \n if params is None:\n params = {}\n \n if extras is None:\n extras = {}\n \n #Polite Pool Work\n #---------------------------------------\n #Example \n #GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:GroovyBib@example.org) BasedOnFunkyLib/1.4.\n\n #It is unclear if we need to match this format\n #This is good enough for now\n #Eventually we might allow a user to describe their application\n #version, and url\n ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)\n \n headers = {'user-agent': ua_str}\n \n \n #TODO Check params and # of results ...\n \n #TODO: Implement rate limits ...\n \n \n #The params get passed directly\n r = self.session.get(url,params=params,headers=headers) \n \n\n #Update limits\n #--------------------- \n headers = r.headers\n self.rate_limit = headers.get('X-Rate-Limit-Limit',50)\n self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])\n \n #TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/\n \n\n #These are debug only and should not be used for anything else\n #-------------------------------------------------------------\n self.last_url = url\n self.last_response = r \n self.last_params = params \n \n if r.status_code == 404:\n #This typically happens when the DOI is invalid\n #TODO: Make this a named exception\n raise errors.RequestError(r.text)\n \n json_data = r.json()\n if json_data['status'] == 'failed':\n self.last_error = json_data\n raise errors.CrossrefAPIError(json_data['message'])\n \n #Example error \n \"\"\"\n {'status': 'failed', 'message-type': 'validation-failure', \n 'message': [{'value': 'sample', \n 'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}\n \"\"\" \n \n #TODO: return_type\n if return_type == 'json' or object_fh is None:\n return json_data\n else:\n return object_fh(json_data,self)", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def http_get(config_args):\n #global config_args\n \n \n try:\n #r = requests.get(config_args.address,headers=headers,verify=config_args.cacert)\n if 'https' in config_args.address.lower() and config_args.host != None:\n headers = {'host': config_args.host}\n r = requests.get(config_args.address,headers=headers,verify=False)\n #print(config_args.address.lower())\n elif 'https' in config_args.address.lower() and config_args.host == None:\n r = requests.get(config_args.address,verify=False)\n print(config_args.address.lower())\n else:\n r = requests.get(config_args.address)\n\n if r.status_code != 200 and r.status_code != 301 and r.status_code != 302:\n print(\"Request failed...code: {}\".format(r.status_code))\n except KeyboardInterrupt:\n print ('Stopping now....')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0) \n \n return r", "def get_response(request_url):\n return requests.get(request_url)", "def reddit_request(url, params):\n payload = params\n keys=payload.keys()\n if 'user-agent' not in keys:\n payload['user-agent'] = 'downtime'\n else:\n pass\n return requests.get(url, params=payload)", "def _get(self, request_obj):\n return self._execute_action(request_obj, 'GET')", "def fetch_requests(v1):\n \n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n return jsonify({\n \"message\":\"Successfully fetched requests\",\n \"requests\":[\n a_request.__dict__ for a_request in all_requests\n ]\n })\n return jsonify({\"message\":\"Can not fetch requests now\"})", "def do_GET(self):\n self._try_to_process_request(self._handle_get_request)", "def basic_requests(url):\r\n # get requests\r\n response = requests.get(base_url+'/get')\r\n print(response.status_code)\r\n\r\n time.sleep(3)\r\n print(\"Sleep 3 seconds\")\r\n # post requests\r\n response = requests.post(base_url+'/post')\r\n print(response.status_code)\r\n\r\n time.sleep(2)\r\n print(\"Sleep 3 seconds\")\r\n response = requests.put(base_url + '/put')\r\n print(response.status_code)\r\n\r\n time.sleep(2)\r\n print(\"Sleep 3 seconds\")\r\n response = requests.delete(base_url + '/delete')\r\n print(response.status_code)", "def _req_get(self, url: str):\n self._get_cookies()\n if not self._cookies:\n return\n r = reqtry.get(url, cookies=self._cookies, allow_redirects=False, timeout=(3, 3), tries=3, delay=1,\n backoff=1.5, jitter=(1, 1.5))\n assert r.status_code == 200, f\"Get request: Invalid http status code: {r.status_code}\"\n return r", "def call_api(url):\n\n req = requests.get(url)\n return req", "def call_api(url):\n\n req = requests.get(url)\n return req", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def access_url(context, url):\n context.response = requests.get(context.coreapi_url + url)", "def _Get(self, url, timeout_seconds, headers): # pylint: disable=W0613, R0201\n raise NotImplementedError() # pragma: no cover", "def do_GET(s):\r\n print \"processing get request...\"\r\n s.send_response(200)\r\n s.send_header(\"Content-type\", \"application/json\")\r\n s.send_header(\"Access-Control-Allow-Origin\", \"*\")\r\n s.end_headers()\r\n\r\n inputUrl = s.path\r\n\r\n print \"####INPUT########\"\r\n print inputUrl\r\n print \"#####INPUT#######\"\r\n\r\n try:\r\n index = string.index(inputUrl, \"url=\") + 4\r\n urlParam = inputUrl[index:]\r\n print \"url request: \" + urlParam\r\n response = urllib.urlopen(urlParam).read()\r\n s.wfile.write(response)\r\n except:\r\n print \"no url specified\"\r\n\r\n try:\r\n index = string.index(inputUrl, \"random\") + 6\r\n randomParam = inputUrl[index:]\r\n scientists = open(\"C:\\Users\\ASUS 1\\Documents\\GitHub\\girlsWhoCode\\womenScientists\\women_scientists.txt\", 'r').readlines()\r\n size = len(scientists)\r\n fileIndex = random.randint(0, size)\r\n scientist = scientists[fileIndex]\r\n print \"random request returning: \" + scientist\r\n response = json.dumps(scientist)\r\n s.wfile.write(response)\r\n\r\n except Exception as e:\r\n print e.message\r\n print \"no female scientist requested\"\r\n\r\n try:\r\n index = string.index(inputUrl, \"wiki=\") + 5\r\n scientistParam = inputUrl[index:]\r\n scientist = ' '.join(scientistParam.split(\"%20\"))\r\n print \"wiki request for \" + scientist\r\n page = wikipedia.page(scientist)\r\n summary = page.summary\r\n response = json.dumps(summary)\r\n s.wfile.write(response)\r\n except:\r\n print \"no wiki request\"", "def mock_requests_get(self, mocker):\n mock = mocker.patch(\"requests.get\")\n mock.return_value.__enter__.return_value.json.return_value = {\n \"item\": {\n \"icon\": \"\",\n \"icon_large\": \"\",\n \"id\": 21787,\n \"type\": \"Miscellaneous\",\n \"typeIcon\": \"\",\n \"name\": \"Steadfast boots\",\n \"description\": \"A pair of powerful-looking boots.\",\n \"current\": {\"trend\": \"neutral\", \"price\": \"5.9m\"},\n \"today\": {\"trend\": \"negative\", \"price\": \"- 138.2k\"},\n \"members\": \"true\",\n \"day30\": {\"trend\": \"positive\", \"change\": \"+0.0%\"},\n \"day90\": {\"trend\": \"negative\", \"change\": \"-3.0%\"},\n \"day180\": {\"trend\": \"negative\", \"change\": \"-4.0%\"},\n }\n }", "def __call__(self, request):\n if request.host.startswith('127.0.0.1:'):\n return self.real_http_fetch(request)\n\n response = self.http_archive.get(request)\n\n if self.use_closest_match and not response:\n closest_request = self.http_archive.find_closest_request(\n request, use_path=True)\n if closest_request:\n response = self.http_archive.get(closest_request)\n if response:\n logging.info('Request not found: %s\\nUsing closest match: %s',\n request, closest_request)\n\n if not response:\n reason = str(request)\n if self.use_diff_on_unknown_requests:\n diff = self.http_archive.diff(request)\n if diff:\n reason += (\n \"\\nNearest request diff \"\n \"('-' for archived request, '+' for current request):\\n%s\" % diff)\n logging.warning('Could not replay: %s', reason)\n else:\n if self.inject_script:\n response = _InjectScripts(response, self.inject_script)\n if self.scramble_images:\n response = _ScrambleImages(response)\n return response", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def _get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def test_client_can_load_client_requests_directly(self):\n\n req = self.httpbin.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin.client['get_my_ip'])\n req = self.httpbin.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin.client['get_my_headers'])\n\n req = self.httpbin_2.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_2.client['get_my_ip'])\n req = self.httpbin_2.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin_2.client['get_my_headers'])", "def process_request(self, req, resp, resource, params):", "def _request(self, *args, **kwargs):\n raise NotImplementedError()", "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def _make_http_request_read(self, path):\n url = self.url_base + path\n if url not in self._requests_cache:\n self._requests_cache[url] = self._perform_http_request(url)[2]\n return self._requests_cache[url]", "def get(self, url: str) -> None:\n\n if self.number_of_requests_made % self.change_ip_after == 0:\n self.renew_ip()\n\n else:\n self.wait()\n\n self.last_call_timestamp = round(time.time(), 2)\n super().get(url)\n\n self.number_of_requests_made += 1", "def _process_request(self, request, response):\n ...", "def mock_requests_get(self, mocker):\n mock = mocker.patch(\"requests.get\")\n mock.return_value.__enter__.return_value.json.return_value = {\n \"types\": [],\n \"alpha\": [\n {\"letter\": \"#\", \"items\": 0},\n {\"letter\": \"a\", \"items\": 4},\n {\"letter\": \"j\", \"items\": 2},\n ],\n }", "def _do_request(self, url: str):\n\n self.debug.ok('method', self.method)\n\n if self.client.fake_response_path:\n with open(self.client.fake_response_path, 'r') as f:\n return constants.ResponseCode.OK, f.read()\n\n elif self.method == constants.RequestConst.GET:\n response = requests.get(\n url, headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok(\n constants.RequestConst.QUERY_PARAMETERS,\n self.parameters[constants.RequestConst.QUERY]\n )\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n elif self.method in [\n constants.RequestConst.POST,\n constants.RequestConst.PUT,\n constants.RequestConst.DELETE\n ]:\n if self.method == constants.RequestConst.POST:\n send_request = requests.post\n elif self.method == constants.RequestConst.PUT:\n send_request = requests.put\n elif self.method == constants.RequestConst.DELETE:\n send_request = requests.delete\n\n response = send_request(\n url, json=self.parameters[constants.RequestConst.QUERY],\n headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok('payload', self.parameters[\n constants.RequestConst.QUERY\n ])\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n else:\n return constants.ResponseCode.NOT_FOUND, {}", "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "def access_gemini_url_patch_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.patch(url)", "def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)", "def _make_request(self, additional_params: dict, new_base_url: str = None) -> list:\n if new_base_url:\n url: str = new_base_url\n params: dict = additional_params\n else:\n url: str = self.base_url\n params: dict = dict(self.base_params, **additional_params)\n response: requests.Response = self.session.get(url, params=params)\n if response.status_code == 200:\n try:\n response_text = json.loads(response.text)\n except ValueError:\n response_text = response.text\n else:\n raise ALHTTPExceptionFromResponse(response)\n\n # sometimes we get a pure dictionary back, let's wrap it in a list for consistency\n if isinstance(response_text, dict):\n response_text = [response_text]\n return response_text", "def test_get_request_normal_response(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)", "def _get(self, url: str) -> requests.Response:\n # todo: do some error checking here\n if url.startswith(API_PATH['base']):\n try:\n # logger.debug(f\"RestClient._get(): {url}\") # log in calling function\n response = requests.get(url, auth=self.auth)\n rest_code = response.json()['meta']['code']\n if rest_code not in [200, 201, 204]:\n raise RestException(f\"REST API Error: {rest_code}. {response.content}\")\n except RestException as e:\n logger.error(e)\n return None\n return response\n else:\n raise ValueError(f\"URL is invalid: {url}\")", "async def _make_request(self, url: str, params, server_id: str):\n headers = {\n 'X-Response-Control': 'minified',\n 'User-Agent': 'Friendly Red bot'\n }\n\n if server_id in self.config:\n if 'API_TOKEN' in self.config[server_id]:\n headers['X-Auth-Token'] = self.config['API_TOKEN']\n else:\n await self.bot.say(box('Requests made without an authentication token are limited to 100 requests per 24 hours.\\nYou can request a key by registering at http://api.football-data.org and setting it via [p]football tokenset.'))\n\n async with aiohttp.get(url, headers=headers, params=params) as r:\n if r.status == 200:\n data = await r.json()\n return data\n elif r.status == 400:\n await self.bot.say(box('Bad Request [400]:\\nYour request was malformed most likely the value of a Filter was not set according to the Data Type that is expected.'))\n return\n elif r.status == 403:\n await self.bot.say(box('Restricted Resource [403]:\\nYou tried to access a resource that exists, but is not available for you. This can be out of the following reasons:\\n- the resource is only available to authenticated clients\\n- the resource is only available to donating clients\\n- the resource is not available in the API version you are using'))\n return\n elif r.status == 404:\n await self.bot.say(box('Not found [404]\\nYou tried to access a resource that doesn’t exist.'))\n return\n elif r.status == 429:\n await self.bot.say(box('Too many requests [429]\\nYou exceeded your allowed requests per minute/day depending on API version and your user status.\\nSee http://api.football-data.org/docs/v1/index.html#_request_throttling for more information.'))\n await self.bot.say(box('Requests reset in ' + r.headers['X-RequestCounter-Reset'] + ' seconds.'))\n return\n else:\n await self.bot.say(box('Pancake has no idea what you\\'ve done, seriously.'))\n await self.bot.say(box(r.status + '\\n' + r.json()['error']))\n return", "def get(self, url):\n super().get(mfacebookToBasic(url))", "def request(self, method, url, params=None, data=None):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def _request(self, url, method=\"GET\", data=None):\n\t\t# TODO: exception handling\n\t\tif self.logged_in:\n\t\t\tcookie_list = [\"{}={}\".format(k, v) for k, v in self.cookies.iteritems()]\n\t\t\theaders = {'Cookie': \"; \".join(cookie_list)}\n\t\telse:\n\t\t\theaders = {}\n\t\tself.connection.request(method, self.base_path + url, data, headers)\n\t\treturn self.connection.getresponse()", "def handle_request_get(self, msg):\n\n\t\tfor arg in msg.arguments:\n\t\t\tmethod = {\n\t\t\t\t'ucr': self.handle_request_get_ucr,\n\t\t\t\t'meta': self.handle_request_get_meta,\n\t\t\t\t'info': self.handle_request_get_info,\n\t\t\t\t'modules/list': self.handle_request_get_modules,\n\t\t\t\t'modules': self.handle_request_get_modules,\n\t\t\t\t'categories/list': self.handle_request_get_categories,\n\t\t\t\t'categories': self.handle_request_get_categories,\n\t\t\t\t'user/preferences': self.handle_request_get_user_preferences,\n\t\t\t\t'hosts/list': self.handle_request_get_hosts,\n\t\t\t\t'hosts': self.handle_request_get_hosts,\n\t\t\t}.get(arg)\n\t\t\tif method:\n\t\t\t\tself.finished(msg.id, method(msg))\n\t\t\t\treturn\n\t\traise NotFound()", "def request(self, method, url):\n\t\ttr = TwitterRequest( method.upper(), url )\n\t\treturn self.get_response( tr )", "def _request(self, method, uri, **kwargs):\n url = self.url + uri\n self.logger.debug(\"Requesting {} on {}\".format(method, url))\n response = requests.request(method, url, verify=self.verify, **kwargs)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n self.logger.error(\"Bad http code {} requesting Clair\".format(response.status_code))\n if response.reason == \"Not Found\":\n raise ResourceNotFoundException(\"Resource not found\")\n raise ClairConnectionError(response)\n return response", "def _request(self, method, *args, **kwargs):\n if not \"headers\" in kwargs:\n kwargs[\"headers\"] = self._headers\n return self._session.request(method, self._url(*args), **kwargs)", "def request(self, method, url, *args, **kwargs):\n full_url = urljoin(self.base_url, url)\n if 'data' in kwargs:\n kwargs['data'] = self._encode_data(kwargs['data'])\n return super(Client, self).request(method, full_url, *args, **kwargs)", "def start_requests(self):\n # This predefined list of URLs is chosen to include all types of\n # inquiries possible in the Austrian parliament in order to provide a\n # suitable testing surface for new functions.\n # urls = [\"https://www.parlament.gv.at/PAKT/VHG/XXV/JPR/JPR_00019/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/JPR/JPR_00016/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/J/J_06954/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/M/M_00178/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/JEU/JEU_00003/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XXV/J/J_06758/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_03089/index.shtml\",\n # \"https://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_03091/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/BR/J-BR/J-BR_01155/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_06110/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_06651/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_04024/index.shtml\", \"http://www.parlament.gv.at/PAKT/VHG/XX/J/J_04025/index.shtml\", \"https://www.parlament.gv.at/PAKT/VHG/XX/M/M_00178/index.shtml\"]\n urls = [] if not self.url_override else [self.url_override]\n\n if self.LLP and not self.url_override:\n for i in self.LLP:\n for nrbr in ['NR', 'BR']:\n roman_numeral = roman.toRoman(i)\n options = self.URLOPTIONS.copy()\n options['GP'] = roman_numeral\n options['NRBR'] = nrbr\n url_options = urlencode(options)\n url_llp = \"{}?{}\".format(self.BASE_URL, url_options)\n rss = feedparser.parse(url_llp)\n\n self.logger.info(\"GP {}: {} inquiries from {}\".format(\n roman_numeral, len(rss['entries']), nrbr)\n )\n urls = urls + [entry['link'] for entry in rss['entries']]\n self.TOTAL_COUNTER = len(urls)\n for url in urls:\n yield self.make_requests_from_url(url)", "def _get_request(url, params):\n request = requests.get(url, params=params)\n\n return request", "def challenge_get(self, environ, start_response):\n environ['wsgiorg.routing_args'][1]['recipe_name'] = self.CHALLENGER_RECIPE\n environ['tiddlyweb.type'] = 'text/x-tiddlywiki'\n return get_tiddlers(environ, start_response)", "def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))", "def test_client_load_pages_request(self):\n is_present = hasattr(self.httpbin_3, 'test_requests_patch_method')\n\n self.assertTrue(is_present)", "def __patch(self, url):\n\n res = requests.patch(url, headers=self.auth_header)\n res.raise_for_status()\n return res", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def _request(self, method, url, params=None, data=None, request_type=PRIVATE, headers={}):\n self._is_valid_request_option(request_type=request_type)\n\n request_headers = copy.deepcopy(self.BASE_HEADERS)\n request_headers.update(headers)\n\n response = getattr(requests, method.lower())(\n url,\n headers=request_headers,\n params=params,\n data=data\n )\n\n return self._handle_response(response)", "def patched_request(urlpath):\n def inner(f):\n def wrapped(self, *args, **kwargs):\n cookies = self.client.get(urlpath, verify=False, auth=HTTP_BASIC_AUTH_CREDENTIALS).cookies\n token = cookies.get('csrftoken')\n headers = {\n 'X-CSRFToken': token,\n 'X-Requested-With': 'XMLHttpRequest',\n 'Content-Type': 'application/json'\n }\n f_kwargs = dict(verify=False, auth=HTTP_BASIC_AUTH_CREDENTIALS, headers=headers, cookies=cookies)\n get = partial(self.client.get, **f_kwargs)\n post = partial(self.client.post, **f_kwargs)\n put = partial(self.client.put, **f_kwargs)\n args += (get, post, put)\n return f(self, *args, **kwargs)\n return wrapped\n return inner", "def render_GET(self, request):\r\n # First check if the version is ok\r\n try:\r\n version = request.args['version']\r\n except KeyError:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return \"Request is missing parameter: 'version'\"\r\n\r\n if len(version) != 1:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return \"Parameter 'version' has to be unique in request.\"\r\n\r\n version = version[0]\r\n\r\n if version < MINIMAL_VERSION:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_GONE[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return ('Client version is insufficient. Minimal version is '\r\n \"'{0}'.\".format(MINIMAL_VERSION))\r\n elif version > CURRENT_VERSION:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_NOT_IMPLEMENTED[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return 'Client version is newer than version supported by server.'\r\n\r\n # Version is ok, now the GET request can be processed\r\n # Extract and check the arguments\r\n try:\r\n userID = request.args['userID']\r\n except KeyError:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return \"Request is missing parameter: 'userID'\"\r\n\r\n if len(userID) != 1:\r\n request.setResponseCode(httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0])\r\n request.setHeader('content-type', 'text/plain; charset=utf-8')\r\n return \"Parameter 'userID' has to be unique in request.\"\r\n\r\n userID = userID[0]\r\n\r\n # Get the URL of a Robot process\r\n d = self._realm.requestURL(userID)\r\n d.addCallback(self._build_response, version, request)\r\n d.addErrback(self._handle_error, request)\r\n\r\n return NOT_DONE_YET", "def MutateCustomInterests(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _api_call(self, url, response_checker):\n self.request_compare(url)", "def test_client_can_do_get_request(self):\n response = self.httpbin.get_my_headers(headers={'User-agent': 'Fake user agent'})\n self.assertEqual(response.request.method, 'GET')\n self.assertEqual(response.status_code, 200)", "def fetch_a_request(v1, requestid):\n\n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n returned_request = []\n for a_request in all_requests:\n if a_request.request_id == int(requestid):\n returned_request.append(a_request)\n return jsonify({\n \"message\": \"Successfully fetched the request\",\n \"request\": returned_request[0].__dict__\n })\n \n return jsonify({\n \"message\":\"Request doesnt exist\"\n })", "def monkey_patch_requests_for_xray():\n wrapt.wrap_function_wrapper(\n \"requests.sessions\", \"Session.send\", xray_requests_send,\n )", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'" ]
[ "0.6091576", "0.5723631", "0.5565101", "0.5542613", "0.55409104", "0.55404246", "0.5505588", "0.5505458", "0.54588634", "0.5443315", "0.5433013", "0.5402714", "0.5384366", "0.5381944", "0.5375504", "0.5370188", "0.5344704", "0.5320391", "0.5319966", "0.531785", "0.5312466", "0.53021806", "0.52882236", "0.5266166", "0.52658015", "0.5263842", "0.52623254", "0.5258456", "0.524351", "0.52384543", "0.5231183", "0.5228995", "0.52281874", "0.5227598", "0.5223713", "0.522342", "0.5222416", "0.5217456", "0.52126706", "0.52126706", "0.5212656", "0.5210863", "0.5204232", "0.51968765", "0.5192916", "0.5190968", "0.5180385", "0.5177196", "0.51769125", "0.51769125", "0.517456", "0.5170755", "0.51619613", "0.5152714", "0.5149479", "0.5146963", "0.5139456", "0.5135463", "0.51248443", "0.5117607", "0.5114928", "0.5113999", "0.51059264", "0.5105139", "0.5101366", "0.5083382", "0.50790405", "0.5075156", "0.50737995", "0.5066479", "0.5065204", "0.5044489", "0.50436544", "0.50425375", "0.5041496", "0.5037819", "0.503611", "0.50327253", "0.50323135", "0.50303274", "0.5029045", "0.50242627", "0.50229156", "0.50194085", "0.50181216", "0.501274", "0.50117266", "0.5010329", "0.50080234", "0.50080234", "0.50080234", "0.50080234", "0.5003852", "0.5001169", "0.5000345", "0.49926776", "0.49871254", "0.49862313", "0.49850047", "0.49774465", "0.49715942" ]
0.0
-1
Update the ElasticSearch index every hour.
def update_es_index(): for job in scheduler.get_jobs(): if 'task_type' in job.meta and job.meta['task_type'] == "update_index": scheduler.cancel(job) scheduler.schedule( scheduled_time=datetime.now(), func='haystack.management.commands.update_index.Command().handle()', interval=60 * 60, repeat=None, ) for job in scheduler.get_jobs(): index_job = job if index_job.func_name == 'haystack.management.commands.update_index.Command().handle()': break index_job.meta['task_type'] = "update_index" index_job.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_later(self):\n return", "def update_time(cls, key):\n key.put()", "def every_hour(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_hour', 'time':time, 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()", "def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()", "def periodicUpdate(self):\n try:\n logging.info(f'{self.cn} periodicUpdate = Start')\n isHaz = JsonSettings.parseJson('settings.json','isHazelcast')\n if self.db.isDb():\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n else:\n self.db.initDb()\n self.insertSys()\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n except Exception as e:\n logging.critical(f'{self.cn} Exception: {e}')\n logging.critical(f'{self.cn} StackTrace: \\n', exc_info=1)\n finally:\n logging.info(f'{self.cn} periodicUpdate = End')", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def step010():\n logger.logMessage('Begin: Getting candidate documents from elasticsearch')\n\n def limitHour(d):\n thish = d.start_time.tz_localize(tz='UTC')\n nexth = thish + dt.timedelta(hours=1)\n return { 'range': { 'time': {'gte':thish, 'lt':nexth } } }\n \n conn = sql.create_engine(pgurl)\n client = es.Elasticsearch(hostlist)\n dupesDF = pd.read_sql_table('weather_dupes',conn).set_index('time')\n hours =dupesDF.to_period('H').reset_index()['time'].unique()\n ranges = [ limitHour(h) for h in hours ]\n query = { \n '_source': [ 'tsa','time' ],\n 'query': { \n 'bool': { 'should': ranges } \n } \n }\n #logger.logMessage(level='DEBUG',message='Query body: {0}'.format(query))\n hits = eshelp.scan(client=client,index=indexName,doc_type='doc',query=query)\n numRecs = 0\n with open(candidatesFile,'w') as f:\n for h in hits:\n src = h['_source']\n tsa = int(src['tsa'])\n time = src['time']\n docid = h['_id']\n idx = h['_index']\n f.write(f'{tsa:014d};{time:25s};{docid:32s};{idx:32s}\\n') \n numRecs += 1\n if numRecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records written\".format(numRecs))\n logger.logMessage(message=\"{0:9d} total records written\".format(numRecs))\n logger.logMessage('End: Getting candidate documents from elasticsearch')", "def cron_refresh_spacetrack_cache():\n s = SpaceTrackApi()\n updated_tles_str = s.get_all_tles()\n storage.save_tle_cache(updated_tles_str)\n last_updated[0] = int(time.time())\n metadata = {\n 'last_updated': last_updated[0],\n }\n storage.save_metadata(metadata)", "def reindex(self):", "def reindex(self):", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def _update_on_refresh():\n cities = City.query.all()\n\n #Iterates over all cities in the database and updates their value\n for city in cities:\n metric_resp, imperial_resp = _get_open_weather_requests(city.name)\n\n metric_json = metric_resp.json()\n imperial_json = imperial_resp.json()\n\n city.temp_celsius = int(metric_json[MAIN][TEMPERATURE])\n city.temp_fahrenheit = int(imperial_json[MAIN][TEMPERATURE])\n db.session.commit()", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )", "def update(self, dt):\n pass", "async def afterHoursAutoPurge(self, ctx: Context):", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-dreammarket-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"dreammarket_listing\",\n body=item\n )", "async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()", "def _update(self, host):\n pass", "def update_news_intime(minutes):\n while True:\n db_update.update()\n time.sleep(60 * minutes)", "def __setitem__(self, url, reslut):\n record = {'result': reslut, 'timestamp': datetime.datetime.utcnow()}\n try:\n self.es.index(index=self.index, doc_type=self.doc_type, id=url, body=record)\n except Exception as e:\n print e\n print url, 'failed'", "def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'", "def refresh_index(self):\n synchronize()\n # TODO: add logger call here\n self._compute_embeddings()", "def refresh():\r\n db.drop_all()\r\n db.create_all()\r\n for time_value in get_datetime_values('Los Angeles', 'pm25'):\r\n record = Record(datetime=str(time_value[0]), value=time_value[1])\r\n db.session.add(record)\r\n db.session.commit()\r\n return render_template('refresh.html')", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def update(self, dt):", "def update(self, dt):", "def reindex(self):\n raise NotImplementedError()", "def addDayHourToURange(self, dayHour, index):\n ur_hist_len = len(self.__data['updateRange'][index]['updateHistory']) - Predictor.rangeHistorySize\n if (ur_hist_len > 0):\n for i in range(ur_hist_len):\n self.__data['updateRange'][index]['updateHistory'].pop(0)\n self.__data['updateRange'][index]['updateHistory'].append(dayHour)", "def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))", "def increment_hourly_total(self, unique, property_id=None, value=1):\n key = (self.user_name, self.bucket_name, \"hourly_event\", self.shard)\n property_id = property_id or _32_BYTE_FILLER\n column_id = \"\".join([\n self.id,\n property_id[0:16],\n pack_hour(),\n property_id[16:32]])\n increment_counter(key, column_id=column_id, value=value)\n if unique:\n key = (\n self.user_name, \n self.bucket_name, \n \"hourly_unique_event\", \n self.shard)\n increment_counter(key, column_id=column_id)", "def update_time(self):\n pass # Do nothing", "def finish_hour(self):\n\t\tassert len(self.values) >= 4, 'A fully formed update date is needed.'\n\t\tself.values = self.values[:4]", "def update_time(self):\n try:\n self._thread_pool_executor.submit(self._update_time_fn)\n except:\n self._logger.exception('Exception caught submitting time metrics update task.')", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def update(self, dt):\n\t\tpass", "def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')", "def app_index_job(cls):\n import time\n s = time.time()\n print('init--redis')\n news = json.dumps(DB.index_news(), ensure_ascii=False)\n mvs = json.dumps(DB.index_mvs('mv'), ensure_ascii=False)\n dsjs = json.dumps(DB.index_mvs('dsj'), ensure_ascii=False)\n dms = json.dumps(DB.index_mvs('dm'), ensure_ascii=False)\n zys = json.dumps(DB.index_mvs('zy'), ensure_ascii=False)\n mv_top = json.dumps(DB.index_tops('mv')[0:6], ensure_ascii=False)\n dsj_top = json.dumps(DB.index_tops('dsj')[0:6], ensure_ascii=False)\n zy_top = json.dumps(DB.index_tops('zy')[0:6], ensure_ascii=False)\n dm_top = json.dumps(DB.index_tops('dm')[0:6], ensure_ascii=False)\n # 今日更新和总视频数量\n today, total = DB.today_total(None)\n # 淘宝广告\n ads = json.dumps(TBApi.get_tb_goods(), ensure_ascii=False)\n cls.r.set('news', news)\n cls.r.set('mvs', mvs)\n cls.r.set('dsjs', dsjs)\n cls.r.set('dms', dms)\n cls.r.set('zys', zys)\n cls.r.set('mv_top', mv_top)\n cls.r.set('dsj_top', dsj_top)\n cls.r.set('zy_top', zy_top)\n cls.r.set('dm_top', dm_top)\n cls.r.set('today', today)\n cls.r.set('total', total)\n cls.r.set('ads', ads)\n del news, mvs, dsjs, dms, zys, mv_top, dsj_top, zy_top, dm_top, ads\n print(f'{time.time() - s}')", "async def send_to_elastic(self, data, index='wallarm'):\n self.es.index(body=data, index=index)\n return print('Sent successfully')", "def refresh():\n DB.drop_all()\n DB.create_all()\n df_meas = open_api.measurements(city='Los Angeles', parameter='pm25', df=True)\n df_meas['date.utc'] = df_meas['date.utc'].astype(str)\n create_DB_records(df_meas)\n DB.session.commit()\n message = 'Data refreshed on: ' + str(datetime.datetime.now())\n over9s = Record.query.filter(Record.value > 9)\n recs = Record.query.filter(Record.id < 20)\n over5s = Record.query.filter(Record.value > 5)\n return render_template('base.html', message=message, over9s=over9s, over5s=over5s, recs=recs)", "def solr_reindex(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n cmd = 'bin/django update_index dasa --batch-size=5000 --remove --verbosity=2'\n run(cmd)", "def reset_file_index_cache() -> None:\n fileindex_cache_five_minutes.invalidate()", "def hour(self) -> Index:\n warnings.warn(\n \"`hour` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.hour)", "def updateDateIndex(map, accidente):\n occurreddate = accidente['Start_Time']\n accidentedate = datetime.datetime.strptime(occurreddate, '%Y-%m-%d %H:%M:%S')\n entry = om.get(map, accidentedate.date())\n if entry is None:\n datentry = newDataEntry(accidente)\n om.put(map, accidentedate.date(), datentry)\n else:\n datentry = me.getValue(entry)\n addDateIndex(datentry, accidente)\n return map", "def update(self, es, **kwargs):\n pass", "def update(self, wiki):\n self.initialize_index()\n last_rev = self.get_last_revision()\n if last_rev == -1:\n changed = self.storage.all_pages()\n else:\n changed = self.storage.changed_since(last_rev)\n changed = list(changed)\n # six.print_('changed', changed, last_rev)\n if changed:\n self.reindex(wiki, changed)\n # if self.INDEX_THREAD and self.INDEX_THREAD.is_alive:\n # print 'alreading reindexing'\n # else:\n # self.INDEX_THREAD = threading.Thread(target=self.reindex, args=(wiki, changed))\n # self.INDEX_THREAD.daemon = True\n # self.INDEX_THREAD.start()", "def refresh_index_patterns(cluster, patterns):\n _refresh_path = '/es_admin/.kibana/index-pattern/'\n _headers = {'kbn-xsrf': 'anything'}\n kbn_config = cluster['kibana']\n kbn_auth = cluster['kibana']['auth']\n kbn_url = f'{kbn_config[\"protocol\"]}://{kbn_config[\"url\"]}:{kbn_config[\"port\"]}'\n\n for pattern, timefield in patterns.items():\n quoted_pattern = quote_plus(pattern, safe='*')\n url = f'{kbn_url}{_refresh_path}{quoted_pattern}/'\n\n # if time series pattern, add timefield\n payload = {'title': pattern, 'notExpandable': True}\n if timefield:\n payload['timeFieldName'] = timefield\n\n rv = requests.post(url, json=payload, headers=_headers, auth=kbn_auth)\n log.debug('sent_refresh', pattern=pattern, payload=payload,\n url=url, response={**rv.__dict__})", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def update_timeindex(self, day):\n self.current_positions['datetime'] = self.bars.date\n self.all_positions.append(self.current_positions.copy())\n\n self.current_prices['datetime'] = self.bars.date\n self.all_prices.append(self.current_prices.copy())\n\n self.current_holdings['datetime'] = self.bars.date\n self.all += (self.buy + self.sell)\n self.current_holdings['buy_times'] = self.buy\n self.current_holdings['sell_times'] = self.sell\n self.current_holdings['total_times'] = self.all\n\n hold = 0\n self.current_holdings['total'] = self.current_holdings['cash']\n for s in self.symbol_list:\n # Approximates the real value\n if self.current_positions[s] > 0:\n hold += 1\n market_value = self.current_positions[s] * \\\n self.bars.get_latest_bar_value(s, 'True_close')\n self.current_holdings['total'] += market_value\n\n self.current_holdings['hold'] = hold\n # Append the current holdings\n self.all_holdings.append(self.current_holdings.copy())\n\n print 'Day %s[%s]: buy=%s, sell=%s, hold=%s.' % \\\n (day, self.bars.date, self.buy, self.sell, hold)", "def _update_index(self):\n start_time = datetime.datetime.now()\n sys.stdout.write(\"Updating index. Depending on the size of your music \"\n \"collection this may take some time, so please be patient. \"\n \"(Update started at %s)\\n\" % start_time)\n new_index_file = \"%s/music_index_%s.txt\" % (self.index_dir,\n start_time.strftime(\"%Y%m%d_%H%M%S\"))\n files = (os.path.join(tup[0], f) for d in self.music_dirs \n for tup in os.walk(d) \n for f in tup[2] )\n \n with open(new_index_file, \"w\") as fh:\n for filename in files:\n fh.write(\"%s\\n\" % filename)\n \n end_time = datetime.datetime.now()\n sys.stdout.write(\"Music index updated (created index file '%s')\\n\" \n \"Update duration:%s\\n\" % \n (new_index_file, end_time - start_time))", "def commit(self):\n with self.lock:\n try:\n action_buffer = self.BulkBuffer.get_buffer()\n if action_buffer:\n successes, errors = bulk(self.elastic, action_buffer)\n except Exception as e:\n # Handle errors from bulk indexing request\n raise\n \n retry_until_ok(self.elastic.indices.refresh, index=\"\")", "def run_indexa_update_currency(self):\n records = self.search([('indexa_currency_next_execution_date', '<=', fields.Date.today())])\n if records:\n to_update = self.env['res.company']\n for record in records:\n if record.indexa_currency_interval_unit == 'daily':\n next_update = relativedelta(days=+1)\n elif record.indexa_currency_interval_unit == 'weekly':\n next_update = relativedelta(weeks=+1)\n elif record.indexa_currency_interval_unit == 'monthly':\n next_update = relativedelta(months=+1)\n else:\n record.indexa_currency_next_execution_date = False\n continue\n record.indexa_currency_next_execution_date = datetime.date.today() + next_update\n to_update += record\n to_update.indexa_update_currency_rates()", "def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res", "def _idx_changed(self, idx):\n self.refresh_memory()", "def _Dynamic_UpdateIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"UpdateIndex\", request_id)\n return", "def increment_hourly_path(\n self, \n event_id, \n unique, \n property_id=None, \n value=1):\n key = (self.user_name, self.bucket_name, \"hourly_path\", self.shard)\n property_id = property_id or _32_BYTE_FILLER\n column_id = \"\".join([\n self.id,\n property_id[0:16],\n pack_hour(),\n property_id[16:32],\n event_id])\n increment_counter(key, column_id=column_id, value=value)\n if unique:\n key = (\n self.user_name, \n self.bucket_name, \n \"hourly_unique_path\", \n self.shard)\n increment_counter(key, column_id=column_id)", "def update(self, dt):\n\n self.collecting(dt)", "def index_item(self, item):\n index_name = self.settings['ELASTICSEARCH_INDEX']\n index_suffix_format = self.settings.get(\n 'ELASTICSEARCH_INDEX_DATE_FORMAT', None)\n\n if index_suffix_format:\n index_name += \"-\" + datetime.strftime(datetime.now(),\n index_suffix_format)\n\n if isinstance(item, DocumentItem):\n index_action = {\n '_index': index_name,\n '_type': self.settings['ELASTICSEARCH_TYPE'],\n '_id': hashlib.sha1(item['url']).hexdigest(),\n '_source': dict(item)\n }\n elif isinstance(item, LinkItem):\n index_action = {\n \"_op_type\": \"update\",\n \"_index\": index_name,\n \"_type\": self.settings['ELASTICSEARCH_TYPE'],\n \"_id\": hashlib.sha1(item['target']).hexdigest(),\n \"script\": {\n \"inline\": \"\"\"ctx._source.anchors = ctx._source.anchors ?\n (ctx._source.anchors + [anchor]).unique{it}\n : [anchor]\"\"\",\n \"params\" : {\n \"anchor\" : item[\"anchor\"]\n }\n },\n \"upsert\": {\n \"anchors\": [item[\"anchor\"]],\n \"url\": item['target'],\n \"domain\": urlparse(item['target']).hostname,\n \"updated_on\": datetime.now().strftime(\n \"%Y-%m-%dT%H:%M:%S\")\n }\n }\n elif isinstance(item, AuthorityItem):\n index_action = {\n \"_op_type\": \"update\",\n \"_index\": index_name,\n \"_type\": self.settings['ELASTICSEARCH_TYPE'],\n \"_id\": item['url'],\n \"doc\": {\n \"authority\": item['score']\n }\n }\n else:\n return\n\n self.items_buffer.append(index_action)\n\n if len(self.items_buffer) >= \\\n self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500):\n self.send_items()\n self.items_buffer = []", "def every_day(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_day', 'time':time, 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()", "def enable_index_update_feature(settings):\n settings.FEATURES[INDEX_UPDATES] = True", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def new_entry_update(cls, summary):\n totaltimes = [x.totaltime for x in summary.entries]\n total = sum(totaltimes, timedelta())\n average = total / len(totaltimes)\n summary.total_time = total\n summary.daily_average = average", "def updateDateIndex(map, accident):\n occurreddate = accident['Start_Time']\n accidentdate = datetime.datetime.strptime(occurreddate, '%Y-%m-%d %H:%M:%S')\n entry = om.get(map, accidentdate.date())\n if entry is None:\n datentry = newDataEntry2(accident)\n om.put(map, accidentdate.date(), datentry)\n else:\n datentry = me.getValue(entry)\n addDateIndex(datentry, accident)\n\n return map", "def step(self):\n self.update(Options['update interval'])", "def update(self, record):\n record[self.UPDATED_AT] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n super(self.__class__, self).update(record)", "def report_hour_distribution(self):\n self.histogram_granularities.add(histogram_granularity.HOUR)\n return self", "def update_stats(self, idx, key):\n\n stats = self.stats\n if not stats.has_key(idx):\n stats[idx] = {}\n if stats[idx].has_key(key):\n stats[idx][key] += 1\n else:\n stats[idx][key] = 1", "def addDayHour(self, dayHour):\n for i, uRange in enumerate(self.__data['updateRange']):\n if (Predictor.inURange(dayHour, uRange)): \n self.addDayHourToURange(dayHour, i)\n return True\n return False", "def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")", "def updateDateIndex(map, accident):\n occurreddate = accident['End_Time']\n accidentdate = datetime.datetime.strptime(occurreddate, '%Y-%m-%d %H:%M:%S')\n entry = om.get(map, accidentdate.date())\n if entry is None:\n datentry = newDataEntry(accident)\n om.put(map, accidentdate.date(), datentry)\n else:\n datentry = me.getValue(entry)\n addDateIndex(datentry, accident)\n return map", "def get_hourly(self):\n pass", "def update():", "def update():", "def build_index(self):\n self.rebuild_index()", "def set_dt_index(self, dt_0=None):\r\n self.set_datetime(dt_0)\r\n dt = pd.to_timedelta(self.data['t'], unit='sec') + self.dt_0\r\n self.data.index = dt\r\n self.data.index.name = 'dt'", "def update(self) -> None:\n ...", "def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )", "def refresh():\n DB.drop_all()\n DB.create_all()\n samples = pull_pm('Los Angeles', 'pm25')\n for sample in samples:\n measure = Record(datetime = str(sample[0]), value = sample[1])\n DB.session.add(measure)\n DB.session.commit()\n return 'Data refreshed!'", "def task_refresh_all_stats_score(request):\n start = time.time()\n cls_name = request.POST.get('cls') or 'Day'\n destroy = int(request.POST.get('destroy', '0'))\n cursor = datastore_query.Cursor(urlsafe=request.POST.get('cursor'))\n task_count = int(request.POST.get('task_count', '0'))\n assert cls_name in ('Day', 'Multi'), cls_name\n cls = (\n models.AccountStatsDay\n if cls_name == 'Day' else models.AccountStatsMulti)\n\n # Task queues are given 10 minutes. Do it in 9 minutes chunks to protect\n # against most timeout conditions.\n timeout = 540\n updated = 0\n skipped = 0\n try:\n futures = []\n chunk_size = 10\n items = []\n more = True\n if destroy:\n options = ndb.QueryOptions(keys_only=True)\n else:\n options = ndb.QueryOptions()\n while more:\n batch, cursor, more = cls.query(default_options=options).fetch_page(\n 20, start_cursor=cursor)\n if destroy:\n futures.extend(ndb.delete_multi_async(batch))\n updated += len(batch)\n else:\n for i in batch:\n score = models.compute_score(i)\n if i.score != score:\n items.append(i)\n if len(items) == chunk_size:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n items = []\n futures = [f for f in futures if not f.done()]\n else:\n skipped += 1\n if time.time() - start >= timeout:\n break\n if items:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n ndb.Future.wait_all(futures)\n if not more and cls_name == 'Day':\n # Move to the Multi instances.\n more = True\n cls_name = 'Multi'\n cursor = datastore_query.Cursor()\n if more:\n taskqueue.add(\n url=reverse(task_refresh_all_stats_score),\n params={\n 'cls': cls_name,\n 'cursor': cursor.urlsafe() if cursor else '',\n 'destroy': str(destroy),\n 'task_count': str(task_count+1),\n },\n queue_name='refresh-all-stats-score')\n result = 200\n except (db.Timeout, DeadlineExceededError):\n result = 500\n out = 'Index: %d\\nType = %s\\nStored %d items\\nSkipped %d\\nIn %.1fs\\n' % (\n task_count, cls.__name__, updated, skipped, time.time() - start)\n if result == 200:\n logging.info(out)\n else:\n logging.error(out)\n return HttpTextResponse(out, status=result)", "def update(self): \n \n timeout=0\n \n while(timeout<10):\n try:\n data = pnd.read_csv(self.path,index_col=0,sep=',',names=['Values'])\n self.data['Values']=data['Values'].values\n self.data.Values.values[-1] = data.index.values[-1]\n \n# print('Updated from file:',self.data.iloc[-1].values[0])\n break\n except ValueError:\n timeout+=1\n if timeout == 10:\n print('failed to update')", "def _scheduled_update(now):\n _LOGGER.debug(\"%s: executing scheduled update\", self.entity_id)\n self.async_schedule_update_ha_state(True)\n self._update_listener = None", "def update_view_times(app):\n app.logger.info('Scheduler update_view_times running: %s' % post_view_times_counter)\n d = dict(post_view_times_counter)\n post_view_times_counter.clear()\n for k, v in d.items():\n p = Post.find_one({'_id': k})\n if p:\n try:\n p.viewTimes += v\n p.save()\n except:\n app.logger.exception('Failed when updating the viewTime for album %s' % p._id)", "def update( ):\r\n pass", "def increase_scan_interval(hass):\n hue_sensor_base.SensorManager.SCAN_INTERVAL = datetime.timedelta(days=365)", "def refreshTable(self):\n ds = []\n for id in self.protocol.getRefreshIDs():\n node = Node(id)\n nearest = self.protocol.router.findNeighbors(node, self.alpha)\n spider = NodeSpiderCrawl(self.protocol, node, nearest)\n ds.append(spider.find())\n\n def republishKeys(_):\n ds = []\n # Republish keys older than one hour\n for key, value in self.storage.iteritemsOlderThan(3600):\n ds.append(self.set(key, value))\n return defer.gatherResults(ds)\n\n d = defer.gatherResults(ds)\n d.addCallback(republishKeys)\n d.addErrback(self.onError)\n return d", "def _update(self, count=True, forced=False):", "def update_timeindex(self, event):\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\n \n # Update positions\n # ================\n dp = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dp['datetime'] = latest_datetime\n \n for s in self.symbol_list:\n dp[s] = self.current_positions[s]\n \n # Append the current positions\n self.all_positions.append(dp)\n \n # Update holdings\n # ===============\n dh = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dh['datetime'] = latest_datetime\n dh['cash'] = self.current_holdings['cash']\n dh['commission'] = self.current_holdings['commission']\n dh['total'] = self.current_holdings['cash']\n \n for s in self.symbol_list:\n # Approximation to the real value\n market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, \"adj_close\")\n dh[s] = market_value\n dh['total'] += market_value\n \n # Append the current holdings\n self.all_holdings.append(dh)\n print('timeindex: ', dh)", "def queriesInEachHour(self):\n hours = 0\n\n #prints out each element (with number of DB Queries) of array\n while hours < 24:\n print (hours,'to',hours+1, ' : ', self.arrayOfTimes[hours])\n hours += 1", "def reschedule_active_metadata_update(key, url):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if instance.active_metadata_update.url != url:\n logging.warning(\n 'Instance has unexpected active metadata operation: %s', key)\n return\n\n metadata_updates = [instance.active_metadata_update]\n metadata_updates.extend(instance.pending_metadata_updates)\n instance.active_metadata_update = compress_metadata_updates(metadata_updates)\n instance.pending_metadata_updates = []\n instance.put()", "def update(self, dt):\n self.level.update(self.keys, dt)", "def updateIndex(ix, pool_path):\n \n logger.debug('updating search index')\n writer = ix.writer()\n \n exercise_list = [f.name for f in os.scandir(pool_path) if f.is_dir()]\n for ex in exercise_list:\n if ex == '.search_index':\n continue\n task_file = os.path.abspath(os.path.join(pool_path, ex, 'task.tex'))\n if os.path.isfile(task_file):\n logger.info('parsing ' + task_file)\n metaData, task_texcode = parseTaskFile(task_file)\n else:\n logger.warning(ex + ' does not include a task.tex file. skipping entry')\n continue\n \n solution_file = os.path.abspath(os.path.join(pool_path, ex, 'solution.tex'))\n if os.path.isfile(solution_file):\n with open(solution_file, 'r') as f:\n solution_texcode = f.read()\n else:\n logger.warning(ex + ' does not include a solution.tex file')\n solution_texcode = ''\n \n if metaData['date'] == '':\n lastupdate = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n else:\n lastupdate = parse_date(metaData['date'])\n\n writer.add_document(\n folder_name=ex,\n task=task_texcode,\n solution=solution_texcode,\n language=metaData['language'],\n maintainer=metaData['author'],\n lastupdate=lastupdate,\n keywords=re.sub(r',\\s+', ',', metaData['keywords'])\n )\n\n writer.commit()", "def es_index(data):\n doc_type = data.get('service')\n es.index(index=INDEX, doc_type=doc_type, body=data)", "def one_step(self, event):\n self.refresh(self._refresh_rate, True)", "def update_H(self, curl_E):", "def update(self, T, num_updates=1, in_order=False, seed=None):\n self.schedule_update([(T, num_updates)], in_order, seed)", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass" ]
[ "0.59411937", "0.57995015", "0.56473935", "0.5602451", "0.558092", "0.551876", "0.5453459", "0.5422525", "0.54206413", "0.54206413", "0.5394929", "0.5394929", "0.53662395", "0.53635", "0.53479695", "0.53402996", "0.532217", "0.53142494", "0.5309737", "0.5285711", "0.52673167", "0.525103", "0.5217854", "0.5209786", "0.5201052", "0.5197442", "0.5189252", "0.5189252", "0.5187253", "0.51755565", "0.5174961", "0.5164124", "0.5163411", "0.5148543", "0.514248", "0.5121596", "0.5120403", "0.51189524", "0.50978506", "0.50734687", "0.5058177", "0.5056586", "0.504522", "0.50417787", "0.50363314", "0.50325537", "0.501305", "0.49975574", "0.4973511", "0.4957602", "0.49574998", "0.49559468", "0.49520752", "0.49465966", "0.49267802", "0.4926275", "0.4920178", "0.49162498", "0.4907312", "0.49041682", "0.4895208", "0.48940438", "0.48928663", "0.4888886", "0.4883144", "0.48739564", "0.48724464", "0.48682138", "0.4867852", "0.4857211", "0.48531112", "0.48528945", "0.4852077", "0.4852077", "0.48514396", "0.48505488", "0.4838406", "0.48327112", "0.4830933", "0.4823441", "0.48096028", "0.4809106", "0.48055875", "0.48042142", "0.48034", "0.4801502", "0.4793459", "0.47934362", "0.4787651", "0.47824427", "0.47774798", "0.47765908", "0.47748932", "0.47730464", "0.47696146", "0.47676218", "0.47621673", "0.47621673", "0.47621673", "0.47621673" ]
0.6486948
0
JavaProcess.__init__(self, class_loc, args=[]) Initializes an external Java process.
def __init__(self, config, class_loc, args=[]): JavaProcess.config = JavaProcessConfig.configFrom_dict(config) self._cp = self._construct_classpath_str() self.class_loc = class_loc self.args = args self._process = None self._stdout = None self._stderr = None LOG.debug("JavaProcess constructed for %s", self.class_loc) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, paths):\n Process.__init__(self)\n self.paths = paths", "def __init__(self, host=\"\", port=8432):\n Process.__init__(self)\n self.host, self.port = host, port\n self._Handler.annotator = self", "def __init__(self):\n self._recording = None\n self._java_call = get_config_str(\"Java\", \"java_call\")\n result = subprocess.call([self._java_call, '-version'])\n if result != 0:\n raise ConfigurationException(\n f\" {self._java_call} -version failed. \"\n \"Please set [Java] java_call to the absolute path \"\n \"to start java. (in config file)\")\n\n self._find_java_jar()\n\n self._machine_json_path = None\n self._placement_json = None\n self._monitor_cores = None\n self._gatherer_iptags = None\n self._gatherer_cores = None\n self._java_properties = get_config_str(\"Java\", \"java_properties\")\n self._chipxy_by_ethernet = None\n if self._java_properties is not None:\n self._java_properties = self._java_properties.split()\n # pylint: disable=not-an-iterable\n for _property in self._java_properties:\n if _property[:2] != \"-D\":\n raise ConfigurationException(\n \"Java Properties must start with -D \"\n f\"found at {_property}\")", "def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output", "def _run_java(self, *args):\n if self._java_properties is None:\n params = [self._java_call, '-jar', self._jar_file]\n else:\n params = [self._java_call] + self._java_properties \\\n + ['-jar', self._jar_file]\n params.extend(args)\n return subprocess.call(params)", "def __init__(self):\n self._process = None\n self._nm = PortScanner()", "def __init__(self, binPath, numProc, wd, platform):\n self.binPath = binPath\n self.numProc = numProc\n self.wd = wd\n self.platform = platform", "def _runner(self, classpath, main, jvm_options, args):", "def __init__(self, task_queue, result_queue):\n multiprocessing.Process.__init__(self)\n self.task_queue = task_queue\n self.result_queue = result_queue", "def __init__(self, args, shell, userns):\n super(BasicMgr, self).__init__(args, shell, userns)\n self.cmd = self._wlbin + args\n\n # Build Popen instance\n try:\n self.p = Popen(self.cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE,)\n except OSError as e:\n if e.errno == errno.ENOENT:\n print(\"Couldn't find program: %r\" % self.cmd[0])\n return\n else:\n raise e", "def __init__(self, stub_class, cmd, port=None):\n self._process_lock = threading.RLock()\n self._process = None\n self._stub_class = stub_class\n self._cmd = [str(arg) for arg in cmd]\n self._port = port", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n self.__pythonExecutable\n )", "def __init__(self, program, args):\n self.__program = program\n self.__args = args", "def _launch(self):\n annotators = ['tokenize', 'ssplit']\n if 'ner' in self.annotators:\n annotators.extend(['pos', 'lemma', 'ner'])\n elif 'lemma' in self.annotators:\n annotators.extend(['pos', 'lemma'])\n elif 'pos' in self.annotators:\n annotators.extend(['pos'])\n annotators = ','.join(annotators)\n options = ','.join(['untokenizable=noneDelete',\n 'invertible=true'])\n # if you work on English, use this this command\n cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',\n annotators, '-tokenize.options', options,\n '-outputFormat', 'json', '-prettyPrint', 'false']\n \n # if you work on arabic, use this this command\n \n # cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n # # 'edu.stanford.nlp.pipeline.StanfordCoreNLP','-annotators',\n # 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-props', 'StanfordCoreNLP-arabic.properties','-annotators',\n # annotators, '-tokenize.options', options, #'-tokenize.whitespace', 'true',\n # '-outputFormat', 'json', '-prettyPrint', 'false']\n print(' '.join(cmd))\n\n # We use pexpect to keep the subprocess alive and feed it commands.\n # Because we don't want to get hit by the max terminal buffer size,\n # we turn off canonical input processing to have unlimited bytes.\n self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)\n self.corenlp.setecho(False)\n self.corenlp.sendline('stty -icanon')\n self.corenlp.sendline(' '.join(cmd))\n self.corenlp.delaybeforesend = 0\n self.corenlp.delayafterread = 0\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)", "def __init__(self, *args, **kwargs):\n mp.Process.__init__(self)\n self._args = args\n self._kwargs = kwargs\n self._host_conn, self._proc_conn = mp.Pipe()\n self.daemon = True\n self.start()\n reply = self._host_conn.recv()\n if isinstance(reply, Exception):\n raise reply", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def _from_java(cls, java_obj):\n # Create a new instance of this stage.\n py_obj = cls()\n py_obj._java_obj = java_obj\n if java_obj is None and java_obj.parentPipeline().isDefined():\n py_parent = MLPipeline()\n py_parent._java_obj = java_obj.parentPipeline().get()\n py_obj._parent = py_parent\n return py_obj", "def __init__(self, target=None, *args, **kwargs):\n super(PyonThread, self).__init__()\n\n if target is not None or not hasattr(self, 'target'): # Allow setting target at class level\n self.target = target\n self.spawn_args = args\n self.spawn_kwargs = kwargs\n\n # The instance of Greenlet or subprocess or similar\n self.proc = None\n self.supervisor = None\n\n self.ev_exit = Event()", "def __init__(self, proc_args: Optional[List[str]]):\n if proc_args:\n self.proc = subprocess.Popen(\n proc_args,\n universal_newlines=True,\n stdin=subprocess.PIPE, # pipe STDIN and STDOUT to send and receive messages\n stdout=subprocess.PIPE\n )\n self.outward_comm_stream = self.proc.stdin\n self.inward_comm_stream = self.proc.stdout\n else:\n self.proc = None\n self.outward_comm_stream = sys.stdout\n self.inward_comm_stream = sys.stdin", "def __init__(self, args, env=None):\n self.args = args\n if env:\n self.env = env\n else:\n self.env = os.environ\n self.stdout = None\n self.stderr = None\n self._process = None", "def __init__(self, program):\r\n self._program = program", "def __init__(self):\n super(MultiProcessEngine, self).__init__()\n self._debug_output = False\n self._name = 'Main'\n self._last_worker_number = 0\n self._log_filename = None\n self._pid = os.getpid()\n self._process_information = process_info.ProcessInfo(self._pid)\n self._process_information_per_pid = {}\n self._processes_per_pid = {}\n self._quiet_mode = False\n self._rpc_clients_per_pid = {}\n self._rpc_errors_per_pid = {}\n self._status_update_active = False\n self._status_update_thread = None\n self._storage_writer = None\n self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT", "def __init__(self, com: AbsCommunicationProcess):\n super().__init__()\n self.__com = com\n self.__is_started = False", "def __init__(self, pid, binary_path, host_name, node_name, telemetry):\n self.pid = pid\n self.binary_path = binary_path\n self.host_name = host_name\n self.node_name = node_name\n self.telemetry = telemetry", "def __init__(self, args, shell, userns):\n super(SSHMgr, self).__init__(args, shell, userns)\n parser = MagicArgumentParser()\n parser.add_argument('--host', type=str, default='localhost',\n help='Machine to reach (default = localhost)')\n parser.add_argument('--pid', type=str,\n help='Variable to store SSH process pid')\n _args, cmd = parser.parse_known_args(args)\n self.cmd = self._wlbin + [_args.host, ] + cmd\n # SSH Cannot fork into background without a command to execute.\n # Popen instance is created in submit", "def __init__(self, argv):\n self._argv = argv", "def __init__(self, readhandle):\n # Name our self\n threading.Thread.__init__(self, name=\"ParentProcessChecker\")\n\n # Store the handle\n self.readhandle = readhandle", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def spawn(self, classpath, main, jvm_options=None, args=None, **subprocess_args):\r\n cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args))\r\n return self._spawn(cmd, **subprocess_args)", "def __init__(self):\n self.buienradar_rpc = {\"rain_at\": self.rain_at,\n \"rain_max\": self.rain_max\n }\n\n multiprocessing.Process.__init__(self)\n self.name = 'buienradar'\n self.shutdown = False\n self._sched = None\n self._rain = []", "def __init__(self, class_name='gumtree.GumTreeApi'):\n if Gumtree.gumtree is None:\n # class path\n jvm_arg = \"-Djava.class.path=\" + my_constant.JAVA_CLASS_PATH\n startJVM(getDefaultJVMPath(), '-d64', jvm_arg)\n # initial class and object\n GumtreeApi = JClass(class_name)\n Gumtree.gumtree = GumtreeApi()", "def __init__(self, connected, jlink_exe=None, jlink_path='', params=None):\n self._connected = connected\n # If not provided, pick the appropriate JLinkExe name based on the\n # platform:\n # - Linux = JLinkExe\n # - Mac = JLinkExe\n # - Windows = JLink.exe\n if jlink_exe is None:\n system = platform.system()\n if system == 'Linux':\n jlink_exe = 'JLinkExe'\n elif system == 'Windows':\n jlink_exe = 'JLink.exe'\n elif system == 'Darwin':\n jlink_exe = 'JLinkExe'\n else:\n raise AdaLinkError('Unsupported system: {0}'.format(system))\n # Store the path to the JLinkExe tool so it can later be run.\n self._jlink_path = os.path.join(jlink_path, jlink_exe)\n logger.info('Using path to JLinkExe: {0}'.format(self._jlink_path))\n # Apply command line parameters if specified.\n self._jlink_params = []\n if params is not None:\n self._jlink_params.extend(params.split())\n logger.info('Using parameters to JLinkExe: {0}'.format(params))\n # Make sure we have the J-Link executable in the system path\n self._test_jlinkexe()", "def __init__(self, isParent):\n UTIL.TASK.ProcessingTask.__init__(self, isParent=isParent)", "def _from_java(cls, java_obj):\n # Create a new instance of this stage.\n py_obj = cls()\n py_obj._java_obj = java_obj\n return py_obj", "def __init__(self, program, *args):\n\n for argument in args:\n if not isinstance(argument, str):\n raise TypeError(\"Arguments to exec should be a string\")\n\n reader, writer = os.pipe2(0)\n pid = os.fork()\n\n if pid == 0:\n os.close(writer)\n os.close(1)\n os.close(2)\n os.dup2(reader, 0)\n\n os.execlp(program, program, *args)\n\n sys.exit(1)\n else:\n os.close(reader)\n self.__pipe = writer\n self.__pid = pid", "def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()", "def __init__(self, classpath, java_executor=None, ivy_settings=None, ivy_cache_dir=None):\r\n\r\n self._classpath = maybe_list(classpath)\r\n\r\n self._java = java_executor or SubprocessExecutor()\r\n if not isinstance(self._java, Executor):\r\n raise ValueError('java_executor must be an Executor instance, given %s of type %s'\r\n % (self._java, type(self._java)))\r\n\r\n self._ivy_settings = ivy_settings\r\n if self._ivy_settings and not isinstance(self._ivy_settings, Compatibility.string):\r\n raise ValueError('ivy_settings must be a string, given %s of type %s'\r\n % (self._ivy_settings, type(self._ivy_settings)))\r\n\r\n self._ivy_cache_dir = ivy_cache_dir\r\n if self._ivy_cache_dir and not isinstance(self._ivy_cache_dir, Compatibility.string):\r\n raise ValueError('ivy_cache_dir must be a string, given %s of type %s'\r\n % (self._ivy_cache_dir, type(self._ivy_cache_dir)))", "def compile_java(self):\n if(self.input == \"\"):\n stderr = subprocess.run(\n [\"javac\", self.id+\".java\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n if(len(stderr) == 0):\n self.status = 1\n stdout = subprocess.run(\n [\"java\"+self.id], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n else:\n self.status = 0\n self.output = stderr\n else:\n pass", "def __init__(self, task, queue, semaphore=None, task_args=None,\n task_kwargs=None):\n multiprocessing.Process.__init__(self)\n self._task = task\n self._queue = queue\n self._semaphore = semaphore\n self._started = multiprocessing.Event()\n self._killing = multiprocessing.Event()\n self._output = None\n self._parent_pid = None\n self._task_args = task_args if task_args else ()\n self._task_kwargs = task_kwargs if task_kwargs else {}", "def __init__(self, task_queue, results_queue, individuals):\n Process.__init__(self)\n \n self.proc_name = self.name\n \n logger.info(\"Setting up variant_annotator: {0}\".format(\n self.proc_name))\n \n logger.debug(\"Setting up task queue\")\n self.task_queue = task_queue\n \n logger.debug(\"Setting up results queue\")\n self.results_queue = results_queue\n\n logger.debug(\"Setting up individuals\")\n self.individuals = individuals\n \n if len(self.individuals) == 1:\n self.models = ['AR_comp', 'AR_comp_dn', 'AD', 'AD_dn']\n else:\n self.models = ['AR_comp', 'AR_comp_dn']", "def __init__(self, process_name=sys.argv[0], transport_factory=transport.TransportUnixFactory()):\n self.factory = transport_factory\n self.server = self.factory.serve()\n self.server.addEndpoint(general.EndpointIntrospect())\n processinfo = general.EndpointProcessInfo()\n processinfo.setProcessName(process_name)\n self.server.addEndpoint(processinfo)\n self.server.addEndpoint(tracing.EndpointTraceMapping())\n self.server.addEndpoint(tracing.EndpointNativeTraceSender())", "def __init__(self, ghidraUrl: java.net.URL):\n ...", "def __init__(\n self, process_name: str = \"Process\", is_ms: bool = False, verbose: bool = True\n ):\n self.verbose = verbose\n self.is_ms = is_ms\n self.process_name = process_name\n self.start = -1\n self.end = -1\n self.elapsed_time_ms = -1", "def __init__(self):\n super(PreProcess, self).__init__()", "def __call__(self, basepath: str, scriptpath: str) -> Process:\n ...", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def __init__(self, master):\n super().__init__()\n self.master = master\n self.proc = None\n self.start()", "def __init__(self,cmd=sys.argv[0]):\n\n # Name of the current script's main file without the directory.\n self.name=os.path.basename(sys.argv[0])\n\n # The numeric PID (program ID) of the currently running script.\n self.pid=os.getpid()\n\n # The full, absolute path to the directory given when the current\n # script was run.\n self.dir=os.path.abspath(os.path.dirname(sys.argv[0]))\n\n # Like name and dir, but these follow any symlinks to find the real name.\n # Also, real_dir holds the full, absolute path.\n self.real_dir,self.real_name=os.path.split(os.path.realpath(sys.argv[0]))\n\n # A decent choice of temp file or directory for this program, if\n # needed.\n self.tempdir=self.findMainTempDir()\n self.temp=os.path.join(self.tempdir,'%s.%d'%(self.name,self.pid))\n\n # Get the terminal width and and height, or default to 25x80.\n self.getTerminalSize()", "def __init__(self, *args):\n self.env = os.environ.copy()\n \"\"\"Environment variables (:class:`dict`)\"\"\"\n command = \"modulecmd python \"+' '.join(args)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n retval = p.communicate()\n self._parse(retval)", "def __init__(self, status_in, data_out):\n Process.__init__(self)\n self.input_stream = status_in\n self.data_out = data_out\n self._UPDATE_INTERVAL_MS = 10\n self._status_labels = {} # A dictionary, whose keys are strings and whose values are Tkinter label variables", "def __init__(self, process, thread=None):\n\n self._process = process\n self._thread = thread\n self.__project = None\n self.__sim_procedures_resolved = False\n\n self.load_options = {'auto_load_libs': False}\n self.support_selfmodifying_code = False\n self.use_sim_procedures = True\n self.exclude_sim_procedures_list = []\n self.add_options = set([])\n self.remove_options = set([])\n\n if ANGR_OK:\n try:\n self._process.threads._register_plugin(Angr._thread_plugin, \"angr\")\n except RevengeModulePluginAlreadyRegistered:\n # This will error out if we're already registered\n pass", "def __init__(self, command, **kwds):\n ## default properties\n self.__updater__ = None\n self.__threads__ = weakref.WeakSet()\n self.__kwds = kwds\n\n args = shlex.split(command) if isinstance(command, six.string_types) else command[:]\n command = args.pop(0)\n self.command = command, args[:]\n\n self.eventWorking = Asynchronous.Event()\n self.__taskQueue = Asynchronous.Queue()\n self.__exceptionQueue = Asynchronous.Queue()\n\n self.codec = codecs.lookup(kwds.get('encoding', 'iso8859-1' if sys.getdefaultencoding() == 'ascii' else sys.getdefaultencoding()))\n self.stdout = kwds.pop('stdout')\n self.stderr = kwds.pop('stderr')\n\n ## start the process\n not kwds.get('paused', False) and self.start()", "def __init__(self, args):\n super(SubProcessTransport, self).__init__()\n\n self._args = args\n\n # The value of `._connect_future` and `._proc` determine the state of\n # the connection:\n # - If `_connect_future` is None and `_proc` is None the transport is\n # not connected, and a connection attempt is not in-flight.\n # - If `_connect_future` is not None and `_proc` is None then a\n # connection attempt is in process.\n # - If `_connect_future is None` and `_proc` is not None then a\n # connection is established.\n # - It is invalid for both `_connect_future` and `_proc` to be not\n # None, or for _connect_future to be done (except transiently within\n # callbacks).\n self._connect_future = None\n self._proc = None\n\n # `_wait_future` is not None if and only if `_proc` is not None. It is\n # used to reset `_proc` to None when the process terminates.\n self._wait_future = None", "def __init__(\n self,\n script: Dict[str, Any],\n **kwargs: Any,\n ):\n Module.__init__(self, **kwargs)\n\n # store\n self.script = script\n if 'comm' in script.keys():\n copy_comm = False\n else:\n copy_comm = True\n self._script = self.add_child_object(script, Script, configuration={}, copy_comm=copy_comm)\n\n # add thread func\n self.add_background_task(self._run_thread, False)", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n os.environ.get(\n 'KOMBI_PYTHON2_EXECUTABLE',\n 'python2'\n )\n )", "def __init__(self,\n executable,\n host,\n port,\n app_id,\n script=None,\n application_host=None,\n application_port=None,\n application_root=None,\n auto_id_policy=None,\n blobstore_path=None,\n clear_datastore=None,\n clear_prospective_search=None,\n datastore_path=None,\n enable_sendmail=None,\n enable_task_running=None,\n high_replication=None,\n logs_path=None,\n prospective_search_path=None,\n require_indexes=None,\n show_mail_body=None,\n smtp_host=None,\n smtp_password=None,\n smtp_port=None,\n smtp_user=None,\n task_retry_seconds=None,\n trusted=None,\n use_sqlite=None,\n default_gcs_bucket_name=None):\n self._process = None\n self._host = host\n self._port = port\n if script:\n self._args = [executable, script]\n else:\n self._args = [executable]\n self._BindArgument('--api_host', host)\n self._BindArgument('--api_port', port)\n self._BindArgument('--application_host', application_host)\n self._BindArgument('--application_port', application_port)\n self._BindArgument('--application_root', application_root)\n self._BindArgument('--application', app_id)\n self._BindArgument('--auto_id_policy', auto_id_policy)\n self._BindArgument('--blobstore_path', blobstore_path)\n self._BindArgument('--clear_datastore', clear_datastore)\n self._BindArgument('--clear_prospective_search', clear_prospective_search)\n self._BindArgument('--datastore_path', datastore_path)\n self._BindArgument('--enable_sendmail', enable_sendmail)\n self._BindArgument('--enable_task_running', enable_task_running)\n self._BindArgument('--high_replication', high_replication)\n self._BindArgument('--logs_path', logs_path)\n self._BindArgument('--prospective_search_path', prospective_search_path)\n self._BindArgument('--require_indexes', require_indexes)\n self._BindArgument('--show_mail_body', show_mail_body)\n self._BindArgument('--smtp_host', smtp_host)\n self._BindArgument('--smtp_password', smtp_password)\n self._BindArgument('--smtp_port', smtp_port)\n self._BindArgument('--smtp_user', smtp_user)\n self._BindArgument('--task_retry_seconds', task_retry_seconds)\n self._BindArgument('--trusted', trusted)\n self._BindArgument('--use_sqlite', use_sqlite)\n self._BindArgument('--default_gcs_bucket_name', default_gcs_bucket_name)", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n os.environ.get(\n 'KOMBI_PYTHON3_EXECUTABLE',\n 'python3'\n )\n )", "def addProcess(self, name, args, uid=None, gid=None, env={}):\n class SimpleProcessObject(object):\n\n def starting(self):\n pass\n\n def stopped(self):\n pass\n\n def getName(self):\n return name\n\n def getCommandLine(self):\n return args\n\n def getFileDescriptors(self):\n return []\n\n self.addProcessObject(SimpleProcessObject(), env, uid, gid)", "def __init__(self, user=None, java_home=None, compss_home=DEFAULT_COMPSS_HOME, target_base_dir=None,\n comm=DEFAULT_COMM, runcompss_opts=None, execution_envs=DEFAULT_EXECUTION_ENVS):\n\n # Either we receive a user from cfg or we load it from current user (always defined)\n if user is None:\n import getpass\n self.user = getpass.getuser()\n else:\n self.user = user\n\n # Either we receive a java_home or we load it from environment, if not defined we raise an exception\n if java_home is not None:\n self.java_home = java_home\n else:\n # Load from env\n self.java_home = os.getenv(\"JAVA_HOME\", None)\n if self.java_home is None:\n raise ConfigurationError(\n \"[ERROR] Undefined variable JAVA_HOME in both the configuration file and the environment\")\n\n # Store COMPSs_HOME (always defined because it has a default value)\n self.compss_home = compss_home\n\n # Define compss_log_dir\n user_home = os.path.expanduser(\"~\")\n self.compss_log_dir = os.path.join(user_home, DEFAULT_REL_COMPSS_LOG_DIR)\n\n # Either we receive the target_base_dir or we compute it from user home\n if target_base_dir is None:\n self.target_base_dir = os.path.join(user_home, DEFAULT_REL_TARGET_TESTS_DIR)\n else:\n self.target_base_dir = target_base_dir\n\n # Receive comm (always defined, has default value)\n self.comm = comm\n # Receive comm (can be None)\n self.runcompss_opts = runcompss_opts\n # Receive comm (always defined, has default value)\n self.execution_envs = execution_envs", "def __init__(self, args, shell, userns):\n self._waiting_steps = 0\n self._running_steps = 0\n self.shebang = (\"#!{0} \\n\".format(shell)).encode('utf8', 'replace')\n # Cell output\n self.out, self.err = None, None\n self._userns = userns", "def new_process() -> Process:\n return multiprocessing.Process()", "def __init__(self, senna_path, executable):\n self.senna_path = senna_path\n self.p = sp.Popen(['blabla', '-path', senna_path],\n executable=os.path.join(senna_path, executable),\n stdin=sp.PIPE,\n stdout=sp.PIPE)", "def __init__(self, ospl_home_bin = \"\", uri = \"\"):\r\n\r\n # OSPL uri:\r\n self.uri = uri\r\n # OSPL HOME binary folder:\r\n self.ospl_home_bin = ospl_home_bin\r\n\r\n # OSPL command process:\r\n self.process = Process(OSPL.command)\r\n # Point OSPL command to the OSPL HOME:\r\n self.reset_ospl_command()", "def __init__(self):\n self.read_input()\n self.update_binaries()", "def start_process(self, args):\n try:\n with open(os.devnull, 'w') as devnull:\n popenObj = subprocess.Popen(\n args, stdout=devnull, stderr=subprocess.PIPE, cwd=\"/tmp/\")\n popenObj.name = args\n return popenObj\n except Exception as e:\n self.logger.error(\n \"Cannot start process %s due to reason:%s\", args, e)\n raise e", "def __init__(self, execute_call):\n self._exe = execute_call", "def __init__(self, cmd, **kwargs):\n # Init method - should be subclassed!\n # \n # The subclass methods should look like this:\n # \n # def __init__(self, cmd=\"muscle\", **kwargs):\n # self.parameters = [...]\n # AbstractCommandline.__init__(self, cmd, **kwargs)\n # \n # i.e. There should have an optional argument \"cmd\" to set the location\n # of the executable (with a sensible default which should work if the\n # command is on the path on Unix), and keyword arguments. It should\n # then define a list of parameters, all objects derived from the base\n # class _AbstractParameter.\n # \n # The keyword arguments should be any valid parameter name, and will\n # be used to set the associated parameter.\n self.program_name = cmd\n try:\n parameters = self.parameters\n except AttributeError:\n raise AttributeError(\"Subclass should have defined self.parameters\")\n #Create properties for each parameter at run time\n aliases = set()\n for p in parameters:\n for name in p.names:\n if name in aliases:\n raise ValueError(\"Parameter alias %s multiply defined\" \\\n % name)\n aliases.add(name)\n name = p.names[-1]\n if _re_prop_name.match(name) is None:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name in python\"\n % repr(name))\n if name in _reserved_names:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name because it is \"\n \"a reserved word in python\" % repr(name))\n if name in _local_reserved_names:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name due to the \"\n \"way the AbstractCommandline class works\"\n % repr(name))\n #Beware of binding-versus-assignment confusion issues\n def getter(name):\n return lambda x : x._get_parameter(name)\n def setter(name):\n return lambda x, value : x.set_parameter(name, value)\n def deleter(name):\n return lambda x : x._clear_parameter(name)\n doc = p.description\n if isinstance(p, _Switch):\n doc += \"\\n\\nThis property controls the addition of the %s \" \\\n \"switch, treat this property as a boolean.\" % p.names[0]\n else:\n doc += \"\\n\\nThis controls the addition of the %s parameter \" \\\n \"and its associated value. Set this property to the \" \\\n \"argument value required.\" % p.names[0]\n prop = property(getter(name), setter(name), deleter(name), doc)\n setattr(self.__class__, name, prop) #magic!\n for key, value in kwargs.iteritems():\n self.set_parameter(key, value)", "def __init__(self, args, shell, userns):\n super(SlurmMgr, self).__init__(args, shell, userns)\n\n from . import _DEFAULT_SLURM_OUTERR_FILE\n if _DEFAULT_SLURM_OUTERR_FILE is None:\n self._outerr_files = os.path.join(os.environ['HOME'], \"python-execute-slurm.%J\")\n else:\n self._outerr_files = _DEFAULT_SLURM_OUTERR_FILE\n _outerr_pardir = os.path.abspath(os.path.join(self._outerr_files, os.pardir))\n if not os.path.exists(_outerr_pardir):\n os.makedirs(_outerr_pardir)\n\n parser = MagicArgumentParser()\n parser.add_argument('--jobid', type=str,\n help='Variable to store Slurm Job Id')\n _args, cmd = parser.parse_known_args(args)\n self.cmd = self._wlbin + cmd + [\n '--output=' + self._outerr_files + '.out',\n '--error=' + self._outerr_files + '.err']\n self._is_started = False\n self._is_terminated = False\n self._args_jobid = _args.jobid\n\n # Build Popen instance\n try:\n self.p = Popen(self.cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE,)\n except OSError as e:\n if e.errno == errno.ENOENT:\n print(\"Couldn't find program: %r\" % self.cmd[0])\n return\n else:\n raise e", "def __init__(self, check):\n\n self.check = check\n self.proc = subprocess.Popen(\n shlex.split(check.command),\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n self.fd = self.proc.stdout.fileno()\n self.stream = self.proc.stdout\n self.output = ''\n\n self.started = time.time()\n self.ended = None", "def __init__(self, svc_name: str, svc_type: str, svc_port: str, svc_txt: str = None):\n\n\t\tpltfm = platform.system()\n\n\t\tif pltfm == 'Linux':\n\t\t\twhat = 'Linux'\n\t\t\targs = self.linux_args + [svc_name, svc_type, svc_port]\n\t\t\tif svc_txt != None:\n\t\t\t\targs = args + [svc_txt] # avahi-publish doesn't like empty txt input!\n\n\t\telif pltfm == 'Darwin':\n\t\t\twhat = 'macOS'\n\t\t\targs = self.macos_args + [svc_name, svc_type, \"local\", svc_port]\n\t\t\tif svc_txt != None:\n\t\t\t\targs = args + [svc_txt] # just to keep consistent with Linux path\n\n\t\telse:\n\t\t\tprint(f'Unknown platform \"{pltfm}\"')\n\t\t\tsys.exit(-1)\n\n\t\tprint(f'Platform \"{pltfm}\" assumed to be {what}, using {args} ...')\n\n\t\ttry:\n\t\t\tself.process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\t\texcept Exception as e:\n\t\t\tprint(f'Cannot invoke registration command: {e}')\n\t\t\tsys.exit(-1)", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def __init__(self, serverUrl, databaseName, changeHandlers=None, changeOptions=None, *args, **kwargs):\n Process.__init__(self, None, None, \"learningRegistryChangeMonitor\", args, kwargs)\n self._database = couchdb.Server(serverUrl)[databaseName]\n self._callerThread = None\n self._addHandlerQueue = Queue()\n self._removeHandlerQueue = Queue()\n self._initOptions(changeOptions)\n self._initLastChangeSequence()\n self._initChangeHandlers(changeHandlers)", "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def __init__(self):\n rospy.logdebug(\"Start ParrotDroneEnv INIT...\")\n\n #Spawn Parrot AR Drone through launch file\n self.ros_pkg_name=\"drone_construct\"\n self.launch_file_name=\"put_drone_in_world.launch\"\n \n super(ParrotDroneEnv, self).__init__(\n ros_pkg_name=self.ros_pkg_name,\n launch_file=self.launch_file_name,\n start_init_physics_parameters=True,\n reset_world_or_sim='WORLD')\n\n rospy.logdebug(\"Finished ParrotDroneEnv INIT...\")", "def __init__(__self__, *,\n type: pulumi.Input[str],\n jvm_options: Optional[pulumi.Input[str]] = None,\n relative_path: Optional[pulumi.Input[str]] = None,\n runtime_version: Optional[pulumi.Input[str]] = None,\n version: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"type\", 'Jar')\n if jvm_options is not None:\n pulumi.set(__self__, \"jvm_options\", jvm_options)\n if relative_path is not None:\n pulumi.set(__self__, \"relative_path\", relative_path)\n if runtime_version is not None:\n pulumi.set(__self__, \"runtime_version\", runtime_version)\n if version is not None:\n pulumi.set(__self__, \"version\", version)", "def __init__(self, name, args):\n self._proc = None\n self._args = [f\"/{name}\"]\n self._args.extend(args)", "def __init__(self, argv, stdin, stdout, stderr):\n self._argv = argv\n self._stdin = stdin\n self._stdout = stdout\n self._stderr = stderr", "def __init__(self,\n input_queue: JoinableQueue,\n output_queue: JoinableQueue,\n error_queue: JoinableQueue,\n slack_queue: 'SlackBot.SlackQueue',\n logging_queue: JoinableQueue,\n process_job: Callable[[Type['Task.Task']], Type['Task.Task']],\n name: str =\"PipelineManager\",\n num_processes: int = 1,\n timeout_duration: int = 1) -> None:\n\n self.name = name\n #An attempt to idiot-proof the PipelineManager by instantiating a JoinableQueue() if one didn't exist already.\n self.input_queue = input_queue if input_queue else JoinableQueue()\n self.output_queue = output_queue if output_queue else JoinableQueue()\n self.error_queue = error_queue if error_queue else JoinableQueue()\n self.slack_queue = slack_queue\n self.logging_queue = logging_queue\n self.num_processes = num_processes\n self.process_job = process_job\n self.timeout_duration = timeout_duration\n #A list of active processes comprised of Process objects\n self.process_list: List[Process] = []\n #An internal restart flag (used when all processes managed die)\n self.restart_required = False\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(logging.DEBUG)", "def __init__(self, options, positionals):\n\n print \"* Starting up LOPHI Master Process\"\n\n self.COMMANDS = {G.CTRL_CMD_START: self.command_start,\n G.CTRL_CMD_LIST: self.command_list,\n G.CTRL_CMD_PAUSE: self.command_abstract,\n G.CTRL_CMD_UNPAUSE: self.command_abstract,\n G.CTRL_CMD_SPLASH: self.command_splash,\n G.CTRL_CMD_UPDATE_HW: self.command_update_hw,\n G.CTRL_CMD_STOP: self.command_abstract,\n G.CTRL_CMD_DIE: self.command_abstract,\n G.CTRL_CMD_ATTACH: self.command_abstract,\n G.CTRL_CMD_EXECUTE: self.command_abstract}\n\n self.MSG_TYPES = set([G.CTRL_TYPE, G.REG_TYPE])\n\n # response header\n self.RESP_HEADER = \"[LOPHI Master] \"\n\n logger.debug(\"Importing config files...\")\n\n # Save our config file\n self.master_config_file = options.config_file\n\n # Save our config file\n self.analysis_directory = options.analysis_directory\n\n # Read our config into an internal structure \n self.config_list = Configs.import_from_config(self.master_config_file,\n \"controller\")\n\n # Read our analysis scripts into an internal structure\n self.update_analysis()\n\n # Connect to our database\n self.DB_analysis = DB.DatastoreAnalysis(options.services_host)\n\n # Set our RabbitMQ host\n self.amqp_host = options.services_host", "def __init__(self, allowed_processes=None, crypto_miner=None, custom_feed=None, denied_processes=None, detect_compiler_generated_binary=None, encrypted_binaries=None, execution_flow_hijack=None, intelligence_feed=None, reverse_shell=None, service_unknown_origin_binary=None, skip_ssh_tracking=None, suspicious_elf_headers=None, temp_fs_proc=None, user_unknown_origin_binary=None, web_shell=None, wild_fire_analysis=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration.get_default_copy()\n self.local_vars_configuration = local_vars_configuration\n\n self._allowed_processes = None\n self._crypto_miner = None\n self._custom_feed = None\n self._denied_processes = None\n self._detect_compiler_generated_binary = None\n self._encrypted_binaries = None\n self._execution_flow_hijack = None\n self._intelligence_feed = None\n self._reverse_shell = None\n self._service_unknown_origin_binary = None\n self._skip_ssh_tracking = None\n self._suspicious_elf_headers = None\n self._temp_fs_proc = None\n self._user_unknown_origin_binary = None\n self._web_shell = None\n self._wild_fire_analysis = None\n self.discriminator = None\n\n if allowed_processes is not None:\n self.allowed_processes = allowed_processes\n if crypto_miner is not None:\n self.crypto_miner = crypto_miner\n if custom_feed is not None:\n self.custom_feed = custom_feed\n if denied_processes is not None:\n self.denied_processes = denied_processes\n if detect_compiler_generated_binary is not None:\n self.detect_compiler_generated_binary = detect_compiler_generated_binary\n if encrypted_binaries is not None:\n self.encrypted_binaries = encrypted_binaries\n if execution_flow_hijack is not None:\n self.execution_flow_hijack = execution_flow_hijack\n if intelligence_feed is not None:\n self.intelligence_feed = intelligence_feed\n if reverse_shell is not None:\n self.reverse_shell = reverse_shell\n if service_unknown_origin_binary is not None:\n self.service_unknown_origin_binary = service_unknown_origin_binary\n if skip_ssh_tracking is not None:\n self.skip_ssh_tracking = skip_ssh_tracking\n if suspicious_elf_headers is not None:\n self.suspicious_elf_headers = suspicious_elf_headers\n if temp_fs_proc is not None:\n self.temp_fs_proc = temp_fs_proc\n if user_unknown_origin_binary is not None:\n self.user_unknown_origin_binary = user_unknown_origin_binary\n if web_shell is not None:\n self.web_shell = web_shell\n if wild_fire_analysis is not None:\n self.wild_fire_analysis = wild_fire_analysis", "def __init__(self, WORKDIR, JOBNAME, debug):\n #read in parameters\n self.workdir = WORKDIR\n self.jobname = JOBNAME", "def create_process( # type: ignore[override]\n self,\n python_setup: PythonSetup,\n subprocess_encoding_environment: SubprocessEncodingEnvironment,\n pex_build_environment: PexBuildEnvironment,\n *,\n pex_args: Iterable[str],\n description: str,\n input_files: Optional[Digest] = None,\n env: Optional[Mapping[str, str]] = None,\n **kwargs: Any,\n ) -> Process:\n\n env = dict(env) if env else {}\n env.update(**pex_build_environment.invocation_environment_dict,)\n\n return super().create_process(\n python_setup=python_setup,\n subprocess_encoding_environment=subprocess_encoding_environment,\n pex_path=self.executable,\n pex_args=pex_args,\n description=description,\n input_files=input_files or self.digest,\n env=env,\n **kwargs,\n )", "def __init__(self, name, jar, main_class=None,\r\n action_on_failure='TERMINATE_JOB_FLOW', step_args=None):\r\n self.name = name\r\n self._jar = jar\r\n self._main_class = main_class\r\n self.action_on_failure = action_on_failure\r\n\r\n if isinstance(step_args, basestring):\r\n step_args = [step_args]\r\n\r\n self.step_args = step_args", "def __init__(self, queue, video_file, export_interval, export_directory):\n Process.__init__(self)\n self.queue = queue\n self.video_file = video_file\n self.export_interval = export_interval\n self.export_directory = export_directory\n self.current_state = None\n self.constants = CollisionConstants()\n signal.signal(signal.SIGINT, self.signal_handler)", "def __init__(self):#, username):\n# self.username = username\n self.pid = os.getpid()", "def __init__(self):\n\n self.arg = None\n self.output = None", "def __init__(self, logfile):\r\n super(PopenWrapper, self).__init__()\r\n self.logfile = logfile", "def __init__(self, do_fork=True):\n\n self.msg = \"Test msg %d\"\n self.do_fork = do_fork\n try:\n # Store the Fork PID\n with open(\"/tmp/daemon.pids\", \"w\") as f:\n self.pid = os.fork()\n f.write(f\"{self.pid}|{os.getpid()}\\n\")\n\n if self.pid == 0:\n print(\"PID: %d\" % self.pid)\n if not do_fork:\n os._exit(0)\n\n except OSError as error:\n print(\"Unable to fork. Error: %d (%s)\" % (error.errno, error.strerror))\n os._exit(1)\n\n self.doTask()", "def __init__(\n self,\n output_dir=\"output\",\n resources_dir=\"resources\",\n parallelize=False,\n processes=os.cpu_count(),\n ):\n self._output_dir = output_dir\n self._resources_dir = resources_dir\n self._resources = Resources(resources_dir=resources_dir)\n self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes)", "def __init__(\n self, env, link,\n transmitter_port, receiver_port):\n self.env = env\n self.link = link\n self._transmitter_port = transmitter_port\n self._receiver_port = receiver_port\n env.process(self.run())", "def __init__(self, shared, subbed, G, solver, istart, cutoff):\n\n # Extract the data and save to self\n self.shared = shared\n self.bool_list, self.north, self.east, self.up = subbed[:4]\n self.wn, self.we, self.wu = subbed[4:7]\n self.penn, self.pene, self.penu = subbed[7:]\n self.G, self.solver, self.istart = G, solver, istart\n self.cutoff = cutoff\n # Init the parent Process class\n Process.__init__(self)", "def __init__(self,\n root: Path = None,\n resources_dir: Path = None,\n slave_configuration_path : Path = None,\n binaries_dir : Path = None,\n wrapper_win64 : Path = None,\n wrapper_linux64: Path = None,\n main_script_path : Path = None,\n model_description: Path = None,\n model_description_path : Path = None,\n main_script: Path = None,\n main_class : Path = None,\n pyfmu_dir : Path = None\n ):\n self.model_description = model_description\n\n self.main_script = main_script\n self.main_class = main_class\n self.slave_configuration = None\n\n # paths\n self.root = root\n self.resources_dir = resources_dir\n self.slave_configuration_path = slave_configuration_path\n self.main_script_path = main_script_path\n self.model_description_path = model_description_path\n self.binaries_dir = binaries_dir\n self.wrapper_win64 = wrapper_win64\n self.wrapper_linux64 = wrapper_linux64\n self.pyfmu_dir = pyfmu_dir", "def __init__(self, args):\n self.args = args\n self.sender, receiver = mp.Pipe()\n self.plotter = RealPlotter()\n self.plot_process = mp.Process(\n target=self.plotter, args=(receiver,), daemon=True)\n self.plot_process.start()", "def _spawn_agent_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n if not isinstance(process_instance, ResourceAgent) and not isinstance(process_instance, SimpleResourceAgent):\n raise ContainerConfigError(\"Agent process must extend ResourceAgent\")\n listeners = []\n\n # Set the resource ID if we get it through the config\n resource_id = get_safe(process_instance.CFG, \"agent.resource_id\")\n if resource_id:\n process_instance.resource_id = resource_id\n\n alistener = self._create_listening_endpoint(node=self.container.node,\n from_name=resource_id,\n process=process_instance)\n\n listeners.append(alistener)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n listeners.append(rsvc)\n\n # cleanup method to delete process/agent queue (@TODO: leaks a bit here - should use XOs)\n def agent_cleanup(x):\n self._cleanup_method(process_instance.id, rsvc)\n if resource_id:\n self._cleanup_method(resource_id, alistener)\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=listeners,\n proc_name=process_instance._proc_name,\n cleanup_method=agent_cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_agent_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n # Now call the on_init of the agent.\n self._process_init(process_instance)\n\n if not process_instance.resource_id:\n log.warn(\"New agent pid=%s has no resource_id set\" % process_id)\n\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n if not process_instance.resource_id:\n log.warn(\"Agent process id=%s does not define resource_id!!\" % process_instance.id)\n\n return process_instance", "def __init__(self, cfg, commands, audio_record, audio_play, close_event):\n\n multiprocessing.Process.__init__(self)\n\n self.cfg = cfg\n self.acc = None\n self.acc_cb = None\n self.call = None\n\n self.commands = commands\n self.local_commands = deque()\n\n self.audio_record = audio_record\n self.audio_recording = False\n\n self.audio_play = audio_play\n self.audio_playing = False\n self.local_audio_play = deque()\n\n self.last_frame_id = 1\n self.message_queue = []\n\n self.close_event = close_event\n\n self.black_list = defaultdict(int)", "def __init__(self, configpath):\n super().__init__('master process')\n self._configpath = configpath\n self._children = []", "def __init__(self):\n self.write_queue = Manager().Queue()\n\n BaseManager.register('Arduino',Arduino)\n BaseManager.register('Algorithm',Algorithm)\n BaseManager.register('Android',Android)\n BaseManager.register('ImageCV', ImageCV)\n manager = BaseManager()\n manager.start()\n shared_ard = manager.Arduino()\n shared_alg = manager.Algorithm()\n shared_and = manager.Android()\n shared_icv = manager.ImageCV()\n \n p1 = Process(target=self.read_algorithm, args=(shared_alg, shared_icv))\n p1.start()\n p2 = Process(target=self.read_arduino, args=[shared_ard])\n p2.start()\n p3 = Process(target=self.read_android, args=[shared_and])\n p3.start()\n p4 = Process(target=self.read_imagecv, args=[shared_icv])\n p4.start()\n p5 = Process(target=self.write_target, args=(shared_ard, shared_alg, shared_and, shared_icv))\n p5.start()\n p5.join()", "def __init__(self, process_name, target_function, tasks):\n self.pipe_start, self.pipe_end = multiprocessing.Pipe()\n printnflush (\"Process started: %s\"%process_name)\n self.process = multiprocessing.Process(group=None,\n target=target_function,\n name=process_name,\n args = (process_name, tasks, self.pipe_end))\n self.busy = False", "def __init__(self, *args, **kwargs):\n super(ParentProcessAwareSyncManager, self).__init__(*args, **kwargs)\n # the instance of the ProcessWatchDog class which is responsible for the detection of the killed parent process.\n self._watchdog = None\n\n # time which is given to shutdown everything gracefully before the SIGKILL is sent.\n self._time_before_kill = 10", "def __init__(self, command_list, seed_index=None, stdin=None, ignore_output=None):\n if not isinstance(command_list, list):\n command_list = [command_list]\n command_list = [str(x) for x in command_list]\n # Set up messages to ignore\n if ignore_output is not None:\n self.__ignore_output = ignore_output\n # Usual case, where no open file handle is provided\n if stdin is None:\n self.__process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n # Write seed to stdin\n if seed_index is not None:\n self.__output_prefix += \"Process #{}: \".format(seed_index)\n self.__process.stdin.write(str(seed_index))\n self.__process.stdin.close()\n with open(\"pwgseeds.dat\", \"rb\") as seed_file:\n random_seed_list = seed_file.read().splitlines()\n self.log(\"Providing random seed: {}\".format(random_seed_list[seed_index - 1]))\n # Using an open file handle to provide input to stdin: remember to close this later\n else:\n self.__process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stdin=stdin, stderr=subprocess.PIPE)\n # Setup non-blocking stream readers for stdout and stderr\n self.__stdout = NonBlockingStreamReader(self.__process.stdout)\n self.__stderr = NonBlockingStreamReader(self.__process.stderr)" ]
[ "0.6975941", "0.6958235", "0.67394876", "0.6490618", "0.6321262", "0.6161817", "0.612476", "0.6118696", "0.6096931", "0.6078772", "0.60693926", "0.60651165", "0.60423166", "0.60343724", "0.6023563", "0.600156", "0.5997967", "0.5996555", "0.5996109", "0.5992397", "0.59232205", "0.5911356", "0.5887123", "0.58784014", "0.58750504", "0.5862586", "0.5821235", "0.58133376", "0.580679", "0.5776895", "0.5772634", "0.5767489", "0.5766778", "0.5746678", "0.5743888", "0.57207537", "0.5697409", "0.56906766", "0.5669662", "0.56688017", "0.5661704", "0.56599146", "0.56578314", "0.5643581", "0.5626292", "0.56133413", "0.55959857", "0.5583519", "0.5575963", "0.5573424", "0.55565506", "0.55485016", "0.5539343", "0.55319566", "0.55318147", "0.55256504", "0.552013", "0.55161864", "0.54897183", "0.5487508", "0.5475878", "0.54602474", "0.5458723", "0.5447629", "0.5441156", "0.54286337", "0.5428051", "0.5424346", "0.54215777", "0.5420897", "0.5418543", "0.54173446", "0.5415367", "0.5402114", "0.54004645", "0.53956383", "0.53883564", "0.53766406", "0.5376354", "0.5370152", "0.5369299", "0.53685945", "0.536283", "0.5357425", "0.5348491", "0.5336658", "0.53350246", "0.53304887", "0.53295434", "0.5319991", "0.531698", "0.531043", "0.5309474", "0.5308306", "0.5303908", "0.5297841", "0.5284864", "0.5283084", "0.52805424", "0.5269254" ]
0.8258955
0
we will do a bubble sort on the list and then get the 2nd element to the last
def second_largest(number_list): for i in range(len(number_list)): for j in range(len(number_list) - 1 - i): if number_list[j] > number_list[j+1]: number_list[j + 1], number_list[j] = number_list[j], number_list[j+1] return number_list[-2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bubbleSort(list):", "def bubble_sort(list):\n for i in range(1, len(list) - 1):\n for j in range(len(list) - 1, i-1, -1):\n if list[j - 1] > list[j]:\n x = list[j]\n list[j] = list[j - 1]\n list[j - 1] = x\n return list", "def bubbleSort(lst):\n for i in range(len(lst)):\n for j in range(len(lst)-i-1):\n if lst[j]>lst[j+1]:\n lst[j], lst[j+1], = lst[j+1], lst[j]\n print(lst)", "def bubble_sort(L):\n end = len(L)-1\n\n while end !=0:\n for i in range(end):\n if L[i] > L[i+1]:\n L[i], L[i+1] = L[i+1], L[i]\n \n \n end = end -1", "def bubble_sort(mylist):\n sorted = False\n while not sorted:\n sorted = True\n for i in range(1, len(mylist)):\n if mylist[i - 1] > mylist[i]:\n sorted = False\n mylist[i - 1], mylist[i] = mylist[i], mylist[i - 1]\n return mylist", "def bubble_sort(a_list):\n for item in reversed(range(len(a_list))):\n for i in range(item):\n if a_list[i] > a_list[i + 1]:\n a_list[i], a_list[i + 1] = a_list[i + 1], a_list[i]\n return a_list", "def bubble_sort(input_list):\n \n length = len(input_list)\n \n for i in range(length):\n for j in range(length-i-1):\n if input_list[j] > input_list[j+1]:\n input_list[j], input_list[j+1] = input_list[j+1], input_list[j]\n \n return input_list", "def bubbleSort( listToSort ):\n for i in range( len(listToSort), 0, -1 ):\n for j in range( 0, i-1 ):\n if listToSort[j] > listToSort[j+1]:\n tmp = listToSort[j]\n listToSort[j] = listToSort[j+1]\n listToSort[j+1] = tmp", "def bubble_sort(first):\n # iterate len(lst) times\n for i in range(len(first)):\n\n # integrate [len(lst) - i - 1] times\n for j in range(len(first) - i - 1):\n\n # sort two number if not sorted\n if first[j] > first[j + 1]:\n # swap element at j with element at j + 1\n # and element ad j + 1 with element j\n first[j], first[j + 1] = first[j + 1], first[j]", "def bubble_sort(lst):\n\n def swap(i,j):\n lst[i], lst[j] = lst[j], lst[i]\n\n n = 0\n while n <= len(lst) - 1:\n for i in range(len(lst)-1-n):\n if lst[i] >= lst[i+1]:\n swap(i,i+1)\n n+=1", "def bubble_sort(lst: list) -> None:\n n = len(lst)\n if n == 0 or n == 1:\n return\n for boundary in range(n, 1, -1):\n swapped = False\n for i in range(1, boundary):\n if lst[i - 1] > lst[i]:\n swap(lst, i - 1, i)\n swapped = True\n if not swapped: # if list is now sorted\n return", "def buble_sort(lst):\n lst_sorted = copy.copy(lst)\n for i in range(len(lst_sorted)):\n for j in range(len(lst_sorted)):\n if j == len(lst_sorted) - 1:\n continue\n if lst_sorted[j][1] > lst_sorted[j + 1][1]:\n lst_sorted[j], lst_sorted[j+1] = lst_sorted[j+1], lst_sorted[j]\n\n return lst_sorted", "def bubble_sort(items):\n for num in range(len(items)-1,0,-1):\n for j in range(num):\n if items[j]>items[j+1]:\n temp = items[j]\n items[j] = items[j+1]\n items[j+1] = temp\n return items", "def BubbleSort(ulist):\n done = 0 #This variable is used to break the loop when sorting is done\n while not done:\n done = 1\n for i in range(len(ulist) - 1):\n if ulist[i] > ulist[i+1]:\n ulist[i], ulist[i+1] = ulist[i+1], ulist[i]\n done = 0", "def bubble_sort(integer_list):\n for passnum in range(len(integer_list), 1, -1):\n for i in range(passnum-1):\n if integer_list[i] > integer_list[i+1]:\n integer_list[i], integer_list[i+1] = integer_list[i+1], integer_list[i]\n return integer_list", "def bubble_sort(arr):\n temp = None\n for x in range(0, len(arr)-1):\n for y in range(0, len(arr)-1):\n if arr[y] > arr[y+1]:\n temp = arr[y]\n arr[y] = arr[y+1]\n arr[y+1] = temp \n return arr", "def bubble_sort(arr: List[int]) -> List[int]:\n sorted = False\n while not sorted:\n sorted = True\n for i in range(1, len(arr)):\n if arr[i-1] > arr[i]:\n arr[i], arr[i-1] = arr[i-1], arr[i]\n sorted = False\n return arr", "def bubbleSort(list,i):\n\n if i == 1:\n return list\n\n else:\n for k in range(i - 1):\n if list[k] > list[k+1]:\n list[k], list[k+1] = list[k+1], list[k]\n\n bubbleSort(list,i-1)", "def bubble_sort(a):\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a", "def bubble_sort(a):\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a", "def buble_sort(l):\r\n for i in range(len(l)):\r\n for j in range(i+1, len(l)):\r\n if (l[j-1]>l[j]):\r\n l[j-1], l[j] = l[j], l[j-1]", "def bubble_sort(data):\n the_list = list(data)\n\n length = len(the_list)\n \"\"\" Iterate from the start to the back,\n then go to back - 1 \"\"\"\n while length >= 1:\n innerloop(the_list)\n length -= 1\n\n return the_list", "def improved_bubble_sort(data_list):\n for passnum in range(len(data_list) - 1, 0, -1):\n is_sorted = True\n for idx in range(passnum):\n if data_list[idx] > data_list[idx + 1]:\n temp = data_list[idx]\n data_list[idx] = data_list[idx + 1]\n data_list[idx + 1] = temp\n is_sorted = False\n if is_sorted:\n return", "def bubblesort(given_list):\n completely_sorted = False\n while not completely_sorted:\n update_done = False\n for i in range(1, len(given_list)):\n previous = given_list[i-1]\n if previous > given_list[i]:\n given_list[i-1] = given_list[i]\n given_list[i] = previous\n update_done = True\n if not update_done:\n completely_sorted = True\n return given_list", "def bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n return items", "def bubble_sort(data_list_or_tuple):\n data_list = list(data_list_or_tuple)\n for count, _ in enumerate(data_list, 1):\n for x in range(len(data_list)-count):\n if data_list[x] > data_list[x+1]:\n data_list[x], data_list[x+1] = data_list[x+1], data_list[x]\n return data_list", "def bubble_sort(arr:Sequence[List]) -> Sequence[List]:\n n = len(arr)\n for i in range(n-1):\n for j in range(n-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr", "def bubble_sort(self, data):\n for i in range(len(data)-1, 0, -1):\n for j in range(i):\n if data[j] > data[j+1]:\n tmp = data[j]\n data[j] = data[j+1]\n data[j+1] = tmp\n print \"pass\", i, data", "def bubblesort(lst_bubble):\n n = len(lst_bubble)\n cond = 0\n assign = 0\n for i in range(n):\n for j in range(1,n-i):\n cond +=1\n if lst_bubble[j-1] > lst_bubble[j]:\n lst_bubble[j],lst_bubble[j-1] = swap(lst_bubble[j],lst_bubble[j-1])\n assign +=3\n return (cond, assign)", "def bubble_sort(lst):\n\n swaps_made = 0\n\n for i in range(len(lst)):\n made_swap = False\n for j in range(len(lst)-1-i):\n if lst[j] > lst[j+1]:\n lst[j], lst[j+1] = lst[j+1], lst[j]\n made_swap = True\n swaps_made += 1\n\n if not made_swap:\n break\n\n return lst #or can return # swaps made if that's what we're looing for", "def bubble_sort(itr):\n arr = list(copy.copy(itr))\n is_sorted = True\n n = len(arr)\n while n > 1:\n for i in range(1, n):\n if arr[i] < arr[i - 1]:\n arr[i], arr[i - 1] = arr[i - 1], arr[i]\n is_sorted = False\n if is_sorted: break\n is_sorted = True\n n -= 1\n return arr", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i starting at i and going backwards\n if compare(lst[j], lst[j-1]) < 0: #checks to see if the previous element is smaller than the current (by saying <0 we keep the sort stable as well)\n lst[j], lst[j-1] = lst[j-1], lst[j] #if they are, we switch them\n else:\n break #if they are not, we know that the element is in its proper place\n return lst", "def bubbleSort(sortList):\r\n \r\n for i in range(len(sortList)):\r\n for j in range(len(sortList)-1-i):\r\n if itemAmount[ sortList[j] ] > itemAmount[ sortList[j + 1] ]:\r\n sortList[j], sortList[j+1] = sortList[j+1], sortList[j]", "def short_bubble_sort(num_list):\n exchange = True\n passnum = len(num_list) - 1\n\n while passnum > 0 and exchange:\n exchange = False\n for i in range(passnum):\n # Exchanges items\n if num_list[i] > num_list[i + 1]:\n temp = num_list[i]\n num_list[i] = num_list[i + 1]\n num_list[i + 1] = temp\n exchange = True\n passnum -= 1", "def bubble_sort(mylist):\n n = len(mylist)\n swapped = True\n while swapped:\n swapped = False\n for i in range(1, n):\n if mylist[i-1] > mylist[i]:\n _swap(mylist, i-1, i)\n swapped = True\n n -= 1", "def bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items)-1-i):\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j] # Swap!", "def burbuja(lista:list):\n vector = lista\n for i in range(0, len(vector)-1):\n for j in range(0, len(vector)-1):\n if vector[j] > vector[j+1]:\n tmp = vector[j+1]\n vector[j+1] = vector[j]\n vector[j] = tmp\n return vector", "def bubble_sort(l):\n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n \n # Initialize variables to count\n r = c = w = 0\n lenL = len(l)\n swapped = True \n\n # only if there isn't any swap will the loop stop \n while swapped:\n swapped = False \n\n for i in range(1, lenL):\n r += 2\n c += 1 \n if l[i - 1] > l[i]:\n # Swap the elements\n l[i - 1], l[i] = l[i], l[i - 1]\n w += 2\n swapped = True # loop again \n lenL -= 1 \n \n return c, r, w", "def sorting(my_list):\n for indx in range(1,len(my_list)):\n i=indx\n while i>0:\n if my_list[i]<my_list[i-1]:\n temp=my_list[i-1]\n my_list[i-1]=my_list[i]\n my_list[i]=temp\n i=i-1\n return my_list", "def bubble_sort(alist):\n potato = True\n while potato:\n potato = False\n for idx in range(len(alist) - 1):\n if alist[idx] > alist[idx + 1]:\n temp = alist[idx]\n alist[idx] = alist[idx + 1]\n alist[idx + 1] = temp\n potato = True\n return alist", "def sort_2(l):\n l.reverse()", "def bubble_sort(arr):\n n = len(arr)\n # Traverse through all array elements\n for i in range(n):\n # Last i elements are already in place\n for j in range(0, n-i-1):\n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1] :\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr", "def short_bubble_sort(integer_list):\n exchanged = True\n for passnum in range(len(integer_list), 1, -1):\n exchanged = False\n for i in range(passnum-1):\n if integer_list[i] > integer_list[i+1]:\n exchanged = True\n integer_list[i], integer_list[i+1] = integer_list[i+1], integer_list[i]\n if not exchanged:\n break\n return integer_list", "def bubble_sort(data):\n sorted_data = list(data)\n\n for i in range(len(sorted_data) - 1):\n for j in range(len(sorted_data) - 1 - i):\n if sorted_data[j] > sorted_data[j + 1]:\n sorted_data[j], sorted_data[j + 1] =\\\n sorted_data[j + 1], sorted_data[j]\n return sorted_data", "def bubble_sort_smart(array: list):\n size = len(array)\n\n for i in range(size):\n for j in range(size - i - 1):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def bubblesort(a):\n\n while True:\n swap = 0\n for i in range(1,len(a)):\n if a[i] < a[i-1]:\n a[i], a[i-1] = a[i-1], a[i]\n swap = 1\n \n if not swap:\n break", "def wiggle_sort(nums):\n\n for i in range(len(nums)):\n if (i % 2 == 1) == (nums[i - 1] > nums[i]):\n nums[i - 1], nums[i] = nums[i], nums[i - 1]", "def bubble_sort(array: list):\n size = len(array)\n\n for i in range(size):\n for j in range(size - 1):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]", "def bubble_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Swap adjacent items that are out of order\n for x in range(len(items)-1):\n if items[x]>items[x+1]:\n temp = items[x]\n items[x] = items[x+1]\n items[x+1] = temp", "def test_sort_reversed():\n sorted_data = [5, 4, 3, 2, 1]\n sorted_list = bubble_sort(sorted_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def bubble_sort(array):\n final_position = len(array) - 1\n\n while final_position > 0:\n for index in range(final_position):\n if array[index] > array[index + 1]:\n array[index], array[index + 1] = array[index + 1], array[index]\n\n final_position -= 1\n\n return array", "def bubble_final_position(array):\n swap_point = len(array)\n while swap_point:\n new_swap = 0\n for i in range(1, swap_point):\n if array[i-1] > array[i]:\n array[i-1], array[i] = array[i], array[i-1]\n new_swap = i\n swap_point = new_swap", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def bubble_sort(items):\n # Repeat until all items are in sorted order\n # Swap adjacent items that are out of order\n # loop through list\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\"\"\"\n while not is_sorted(items):\n for i in range(len(items) - 1):\n # Loop backwards avoiding the already sorted numbers\n for j in range(len(items) - 1 - i):\n # if left item is bigger than the right\n if items[j] > items[j + 1]:\n # Swap left and right \n items[j], items[j + 1] = items[j + 1], items[j]\n return items", "def test_bubblesort_sorts_list():\n from bubblesort import bubblesort\n unsorted_list = [6, 4, 7, 9, 0, 2]\n assert bubblesort(unsorted_list) == [0, 2, 4, 6, 7, 9]", "def bubbleSort(unsortedValues: list) -> None:\n \n done: bool = False # initialize to false \n sortedCount: int = 0\n\n while not done: \n done = True\n index: int = 0\n\n while index < len(unsortedValues) - 1 - sortedCount:\n if unsortedValues[index] > unsortedValues[index + 1]:\n swap(unsortedValues, index, index + 1)\n done = False\n index += 1\n sortedCount += 1", "def innerloop(the_list):\n for index in range(len(the_list)-1):\n if the_list[index] > the_list[index+1]:\n smaller = the_list[index+1]\n bigger = the_list[index]\n the_list[index] = smaller\n the_list[index+1] = bigger\n\n return tuple(the_list)", "def bubble_sort(items):\n out = items.copy() # in place protection on items\n for i in range(len(out)):\n for j in range(len(out)-1-i):\n if out[j] > out[j+1]:\n out[j], out[j+1] = out[j+1], out[j] # Swap!\n\n return out", "def bubble_sort(A: list):\n N = len(A)\n list_is_sorted = False\n bypass = 1\n while not list_is_sorted:\n list_is_sorted = True\n for k in range(N - bypass):\n if A[k] > A[k+1]:\n A[k], A[k+1] = A[k+1], A[k]\n list_is_sorted = False\n bypass += 1", "def bubble_sort(items):\n # Repeat until all items are in sorted order\n # Swap adjacent items that are out of order\n current = 0\n right = 1\n while not is_sorted(items):\n if current == len(items) - 1:\n current = 0\n right = 1\n\n elif items[current] > items[right]:\n items[current], items[right] = items[right], items[current]\n \n else:\n current += 1\n right += 1", "def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]", "def bubble_sort_smarter(array: list):\n size = len(array)\n\n while size > 0:\n idx = 0\n for j in range(size):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]\n idx = j + 1\n\n size = idx", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n temp = lst\n switched = True\n while switched:\n switched = False\n for i in range(len(temp) - 1):\n if compare(temp[i], temp[i + 1]) == 1:\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n switched = True\n\n return temp", "def reverse_sort(list_to_sort: List) -> List:\n if len(list_to_sort) == 1:\n return list_to_sort\n list_to_sort = [e for e in list_to_sort] # to avoid sorting in place, comment this to change original list\n for j in range(len(list_to_sort) - 2, -1, -1):\n sorted_element = list_to_sort[j]\n # print('sorted element:', sorted_element)\n i = j + 1\n while i < len(list_to_sort) and list_to_sort[i] > sorted_element:\n list_to_sort[i - 1] = list_to_sort[i]\n i += 1\n list_to_sort[i - 1] = sorted_element\n # print('list after insertion:', list_to_sort)\n return list_to_sort", "def bubble_sort_modificado(a):\n N = len(a)\n n = 0\n b = False\n while (n != N) and (not b):\n k = N - 1\n b = True\n while k != n:\n if a[k-1] > a[k]:\n b = False\n a[k-1], a[k] = a[k], a[k-1]\n k -= 1\n n += 1", "def bubbleSort(sequence):\n n = len(sequence)\n # Perform n-1 bubble operations on the sequence\n for i in range(n - 1):\n # Bubble the largest item to the end.\n for j in range(n - i - 1):\n if sequence[j] > sequence[j+1]:\n sequence[j], sequence[j+1] = sequence[j+1], sequence[j]", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def bubble_sort(dataset):\n\t# start with array length and decrement each time \n\tarrayLen = len(dataset)\n\tbubbleIndex = len(dataset) - 1\n\twhile bubbleIndex != 0:\n\t\tarrayIndex = 0\n\t\twhile arrayIndex < arrayLen - 1:\n\t\t\tthisVal = dataset[arrayIndex]\n\t\t\tnextVal = dataset[arrayIndex + 1]\n\t\t\tif thisVal > nextVal:\n\t\t\t\tdataset[arrayIndex + 1] = thisVal\n\t\t\t\tdataset[arrayIndex] = nextVal\n\t\t\tarrayIndex += 1\n\t\tprint \"Current State:\", dataset\n\t\tbubbleIndex -= 1", "def timsort(lst):\n sublsts = []\n\n i = 0\n while i < len(lst):\n sublsts.append([lst[i]])\n i += 1\n\n if i < len(lst) and lst[i] >= lst[i - 1]:\n while i < len(lst) and lst[i] >= lst[i - 1]:\n sublsts[-1].append(lst[i])\n i += 1\n elif i < len(lst):\n while i < len(lst) and lst[i] < lst[i - 1]:\n sublsts[-1].append(lst[i])\n i += 1\n\n sublsts[-1] = sublsts[-1][::-1]\n\n return merge_many(*sublsts)", "def pizza_sort(lst):\n length = len(lst)\n def based_god_help_me(lst,index=0):\n if index == length - 1:\n return\n greatest = index_largest(lst[index:]) + index\n lst[greatest], lst[index] = lst[index], lst[greatest]\n based_god_help_me(lst,index+1)\n return based_god_help_me(lst)", "def bubble_sort(array):\n\tupdated = 1\n\tcounter = 0\n\n\twhile(updated == 1):\n\t\tupdated = 0\n\t\t\n\t\tfor i in range(len(array)-1-counter):\n\t\t\tif array[i] > array[i+1]:\n\t\t\t\tarray[i],array[i+1] = array[i+1],array[i]\n\t\t\t\tupdated = 1\n\t\t\n\t\tcounter += 1\n\n\treturn array", "def isort(my_list):\n comparision_count = 0\n for index in range(len(my_list)-1):\n # Picking for each Number\n comparision_count = comparision_count + 1\n if my_list[index+1] < my_list[index]:\n # moving the element\n my_list[index + 1], my_list[index] = my_list[index], my_list[index + 1]\n newposition = index\n\n # Comparing the changed element with the already sorted list\n for i in range(index,0,-1):\n comparision_count = comparision_count + 1\n if my_list[newposition-1] > my_list[i]:\n # moving the element\n my_list[i] , my_list[newposition-1] = my_list[newposition-1] , my_list[i]\n newposition = i-1 # remembering the new position\n else:\n # Since it is a sorted list, breaking loop if condition fails atleast once\n break\n return (my_list , comparision_count)", "def gnomesort(self):\n # nothing to do if we're empty or singleton\n if len(self) < 2:\n return\n # start with second element, and always compare to the element before\n current = self.first.next\n while current is not None:\n # thus current must have a .prev\n # If this element is unsorted with the element before it, then\n if current.prev and current.value < current.prev.value:\n # swap this element with the element before it\n # using insert_after and pop_before is an easy way to handle first/last identities\n self.insert_after(current, self.pop_before(current))\n # and then check the new previous-element.\n else:\n # advance to next node (or None if this is the last node in the list, in which case we terminate)\n current = current.next", "def test_original_unchanged():\n first_list = [28, 3, 4, 10, 8]\n bubble_sort(first_list)\n assert first_list == [28, 3, 4, 10, 8]", "def bubblesort(array):\n j = len(array)\n while j > 0:\n swapped = False\n for i in range(1, j):\n if array[i-1] > array[i]:\n array[i-1], array[i] = array[i], array[i-1]\n swapped = True\n j -= 1\n if swapped == False:\n return array\n return array", "def touple_bubble_sort(data_touple, sort_by = 0):\n for passnum in range(len(data_touple) - 1, 0, -1):\n is_sorted = True\n for idx in range(passnum):\n if data_touple[idx][sort_by] > data_touple[idx + 1][sort_by]:\n temp = data_touple[idx]\n data_touple[idx] = data_touple[idx + 1]\n data_touple[idx + 1] = temp\n is_sorted = False\n if is_sorted:\n return", "def wiggleSort(self, nums: 'List[int]') -> 'None':\n ns = sorted(nums)\n N = len(ns)//2 + len(ns)%2\n ns1, ns2 = ns[:N][::-1], ns[N:][::-1]\n for i in range(len(ns)//2):\n nums[i*2] = ns1[i]\n nums[i*2+1] = ns2[i]\n if len(ns) % 2 == 1:\n nums[-1] = ns1[-1]", "def custom_sort(vector:list)->list:\n if len(vector) <= 2:\n return vector\n\n else:\n mid = len(vector) // 2\n vector.insert(1, vector.pop(mid))\n i = 1\n for idx in range(2, len(vector)-1, 2):\n vector.insert(idx+1, vector.pop(mid+i))\n i +=1\n return vector", "def bubble_sort(A):\n seguir = True\n largo = len(A)\n while seguir:\n seguir = False\n for i in xrange(largo - 1):\n if A[i] > A[i+1]:\n A[i], A[i+1] = A[i+1], A[i]\n seguir = True", "def __sort(self, _list, _index, desc, pop_first_element=False):\n if _index != 0:\n _list = [(x[_index], x) for x in _list]\n \n _list.sort()\n \n if desc:\n _list.reverse()\n\n if _index != 0 or pop_first_element: \n _list = [x[1] for x in _list]\n\n return _list", "def wiggleSort(self, nums):\n nums2=nums.copy()\n nums2.sort()\n n = len(nums)\n if n%2:\n midpoint = n//2\n else:\n midpoint = n//2-1\n j=n-1\n i=midpoint\n point=0\n while j > midpoint:\n nums[point] = nums2[i]\n nums[point+1] = nums2[j]\n j-=1\n i-=1\n point+=2\n if n%2:\n nums[-1]=nums2[0]\n print(nums)", "def test_sort_reversed():\n reverse_sorted_data = [3, 2, 1]\n sorted_data = bubble_sort(reverse_sorted_data)\n assert sorted_data == [1, 2, 3]", "def sort_1(l):\n pass", "def stooge_sort(arr):\r\n stooge(arr, 0, len(arr) - 1)", "def wiggleSort(self, nums: List[int]) -> None:\n '''\n ## nlogn\n if len(nums)<=1:\n return nums \n nums.sort()\n l = len(nums)\n for i in range(1,l-1,2):\n nums[i],nums[i+1] = nums[i+1],nums[i]\n return nums\n\n '''\n ## O(n)\n l = len(nums)\n for i in range(1, l):\n if i % 2 == 1 and nums[i] < nums[i - 1]:\n nums[i], nums[i - 1] = nums[i - 1], nums[i]\n if i % 2 == 0 and nums[i] > nums[i - 1]:\n nums[i], nums[i - 1] = nums[i - 1], nums[i]", "def test_sort_reversed():\n assert bubble_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]", "def wiggleSort3(self, nums) -> None:\n # Sort the list, and swap adjacent element starting from the second element\n nums.sort()\n i = 1\n while i < len(nums) - 1:\n nums[i], nums[i+1] = nums[i+1], nums[i]\n i += 2", "def bubble_sort(input_array):\n for i in range(0, len(input_array)):\n for j in range(0,len(input_array) - i - 1):\n if input_array[j] > input_array[j+1]:\n tmp = input_array[j]\n input_array[j] = input_array[j+1]\n input_array[j+1] = tmp", "def sort_012(input_list):\r\n \r\n # Positions 0's to the start of the array and 2's to the end.\r\n # All remaining are 1's.\r\n\r\n next_0_index = 0\r\n next_2_index = len(input_list) - 1\r\n\r\n i = 0\r\n \r\n # Traverse the array once.\r\n # Last index to be checked is next_2_index because it is \r\n # one position before the last placed 2.\r\n while i < next_2_index + 1:\r\n\r\n element = input_list[i]\r\n\r\n if element == 0:\r\n\r\n # Position at next_0_index and put that element in this 0's place\r\n input_list[i], input_list[next_0_index] = input_list[next_0_index], input_list[i]\r\n\r\n # next_0_index is now taken by a 0, next index is to its right\r\n next_0_index += 1\r\n\r\n # Increment to check next element. All from i and before are 0's\r\n i += 1\r\n\r\n if element == 2:\r\n \r\n # Position at next_2_index and put that element in this 2's place\r\n input_list[i], input_list[next_2_index] = input_list[next_2_index], input_list[i]\r\n \r\n # next_2_index is now taken by a 2, next index is to its left\r\n next_2_index -= 1\r\n \r\n if element == 1:\r\n\r\n # Leave as is and proceed to next. Might be replaced by a 0 later\r\n i += 1\r\n\r\n return input_list", "def test_sort_sorted():\n sorted_data = [1, 2, 3, 4, 5]\n sorted_list = bubble_sort(sorted_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def insertionSort(list):", "def test_bubblesort_on_long_list():\n from bubblesort import bubblesort\n unsorted_list = []\n for i in range(100):\n unsorted_list.append(random.randint(0, 1000))\n\n sorted_list = bubblesort(unsorted_list)\n\n assert sorted_list == sorted(unsorted_list)", "def last2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][-1]", "def wiggleSort(self, nums: List[int]) -> None:\n newlist=sorted(nums)\n nums[::2]=newlist[:int(len(nums)/2)+len(nums)%2]\n nums[1::2]=newlist[int(len(nums)/2)+len(nums)%2:]", "def sort(lst):\n n = len(lst)\n done = False\n round = n - 1\n while not done and round:\n done = True\n for i in range(round):\n if lst[i] > lst[i+1]:\n lst[i], lst[i+1] = lst[i+1], lst[i]\n done = False\n round -= 1", "def bubble_sort(numbers):\n sorted_numbers = numbers.copy()\n swapped = True\n while swapped:\n swapped = False\n for i in range(1, len(sorted_numbers)):\n if sorted_numbers[i - 1] > sorted_numbers[i]:\n sorted_numbers[i], sorted_numbers[i - 1] = \\\n sorted_numbers[i - 1], sorted_numbers[i]\n swapped = True\n return sorted_numbers", "def sort():\n return -1", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def sort_012(input_list):\n # Initial two pointers to beginning and end\n # Start another pointer to traverse\n # if list[curr] == 0, then beg++, travel++\n # if list[curr] == 2, then swap with end, and end--, curr++\n # if list[curr] == 1, curr++\n \n # if list[end] == 2, then end-- (to optimize swaps)\n \n beg = curr = 0\n end = len(input_list) - 1\n while curr <= end:\n if input_list[curr] == 0:\n input_list[curr], input_list[beg] = \\\n input_list[beg], input_list[curr]\n beg += 1\n curr += 1\n elif input_list[curr] == 2:\n input_list[curr], input_list[end] = \\\n input_list[end], input_list[curr]\n end -= 1\n elif input_list[curr] == 1:\n curr += 1\n \n return input_list" ]
[ "0.77857566", "0.7745999", "0.75933444", "0.7433549", "0.7385789", "0.73774844", "0.736563", "0.7351545", "0.73503095", "0.73399705", "0.7298297", "0.7254225", "0.7216236", "0.7211703", "0.7194435", "0.7183924", "0.71444356", "0.71343726", "0.7121137", "0.7121137", "0.71004987", "0.70888084", "0.7075632", "0.70584345", "0.7051523", "0.7033102", "0.70131016", "0.69986415", "0.69961786", "0.69758344", "0.69719064", "0.6957651", "0.69523275", "0.6946385", "0.69355404", "0.691562", "0.68937033", "0.6891742", "0.6864843", "0.6852178", "0.68309", "0.6828028", "0.6823788", "0.68164074", "0.67677045", "0.67579424", "0.6709796", "0.6707329", "0.6705577", "0.6697157", "0.6682835", "0.6635967", "0.6630977", "0.66185045", "0.6610023", "0.65530664", "0.65514266", "0.6543614", "0.65413857", "0.6525408", "0.65174145", "0.65145844", "0.65063107", "0.6497544", "0.6493397", "0.6468949", "0.6467729", "0.6425155", "0.6421218", "0.6406594", "0.63877666", "0.6387507", "0.6377164", "0.6375021", "0.637373", "0.6349991", "0.63423204", "0.6341364", "0.63139856", "0.6312119", "0.6309676", "0.63070756", "0.6306077", "0.6305633", "0.6302087", "0.62954754", "0.6289454", "0.6287294", "0.6283412", "0.62779325", "0.627536", "0.6266298", "0.62655157", "0.6253299", "0.6248905", "0.62126064", "0.6202781", "0.61989224", "0.6180999", "0.61748856" ]
0.6551871
56
slick solution in python ONLY zeros = [0 for i in range(zeros_and_ones.count(0))] ones = [1 for j in range(zeros_and_ones.count(1))] return zeros + ones
def zeros_before_ones(zeros_and_ones): index_i = 0 last_index = len(zeros_and_ones) - 1 while index_i < last_index: if zeros_and_ones[index_i] == 1 and zeros_and_ones[last_index] == 0: zeros_and_ones[index_i], zeros_and_ones[last_index] = zeros_and_ones[last_index], zeros_and_ones[index_i] index_i += 1 last_index -= 1 # print(zeros_and_ones) # TODO: NEEDS IMPROVEMENTS! zeros_and_ones
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution2(n):\n ones = 0\n while n > 0:\n if n & 1:\n ones += 1\n n = n >> 1\n\n return 0 if ones % 2 == 0 else 1", "def c(ixs):\n return sum(range(1, sum((i > 0 for i in ixs)) + 1))", "def _iter_restrict(self, zeros, ones):\n inputs = list(self.inputs)\n unmapped = {}\n for i, v in enumerate(self.inputs):\n if v in zeros:\n inputs[i] = 0\n elif v in ones:\n inputs[i] = 1\n else:\n unmapped[v] = i\n vs = sorted(unmapped.keys())\n for num in range(1 << len(vs)):\n for v, val in boolfunc.num2point(num, vs).items():\n inputs[unmapped[v]] = val\n yield sum((val << i) for i, val in enumerate(inputs))", "def addOnes(x,m):\n n = x.size/m\n one = np.ones((m,1))\n x = x.reshape((m,n))\n judge = np.sum(x[:,0] == one.flatten())\n if judge != m:\n x = np.hstack((one,x))\n return x", "def zero_to_ones(L):\n return [-1 if val == 0 else 1 for val in L]", "def matOnes(shape):\n return [[1 for y in range(shape[1])] \\\n for x in range(shape[0])]", "def count_ones(n):\n s = 0\n mask = 1\n for i in xrange(16):\n if (mask << i) & n:\n s += 1\n return s", "def rzeros(nums):\n total = len(nums)\n zeros = 0\n nozeros = []\n for x in nums:\n if x != 0:\n nozeros.append(x) \n else:\n zeros = zeros + 1\n \n return (nozeros, total - zeros, zeros)", "def fn(i, j, mask):\n if j == n: return 1 \n if i == m: return fn(0, j+1, mask)\n ans = 0 \n for x in 1<<2*i, 1<<2*i+1, 0b11<<2*i: \n mask0 = mask ^ x\n if mask0 & 0b11<<2*i and (i == 0 or (mask0 >> 2*i) & 0b11 != (mask0 >> 2*i-2) & 0b11): \n ans += fn(i+1, j, mask0)\n return ans % 1_000_000_007", "def _one_pass(nums):\n pattern = [0, 1, 0, -1]\n return [\n int(str(sum(\n v * pattern[(i // n) % len(pattern)]\n for i, v in enumerate(nums, start=1)\n ))[-1])\n for n in range(1, len(nums) + 1)\n ]", "def zeros(s, zero=0):\n\treturn [zeros(s[1:] ) for i in range(s[0] ) ] if not len(s) else zero", "def zeros(n):\n return [0 for i in range(n)]", "def single_number(nums: List[int]) -> int:\n ones = 0\n twos = 0\n\n for num in nums:\n # Record number that appears twice.\n twos |= (ones & num)\n\n # Record number that appears once.\n ones ^= num\n\n # Remove number that is on ones and twos.\n common_bit_mask = ~(ones & twos)\n ones &= common_bit_mask\n twos &= common_bit_mask\n return ones", "def ones(dice):\n return sum([x for x in dice if x == 1])", "def solution2(array):\n if not array:\n return\n\n n_rows = len(array)\n n_cols = len(array[0])\n\n # Since we are using first row and column as space to mark rows and columns to be zeroed,\n # first check whether they have any zeros or not\n first_row_zero = False\n first_column_zero = False\n \n for c in range(n_cols):\n if not array[0][c]:\n first_row_zero=True\n \n for r in range(n_rows):\n if not array[r][0]:\n first_column_zero = True\n\n # Now start from 1,1 and check for 0. If 0 is found in certain row and column, mark first row\n # and column\n for r in range(1, n_rows):\n for c in range(1, n_cols):\n if not array[r][c]:\n array[0][c] = 0\n array[r][0] = 0\n\n # Now iterate through entire matrix again starting from 1,1 and set element to zero if \n # first row or column for that element is zero\n for r in range(1, n_rows):\n for c in range(1, n_cols):\n if array[0][c]==0 or array[r][0]==0:\n array[r][c] = 0\n\n # Go back to the first row and column.\n if first_row_zero:\n for c in range(n_cols):\n array[0][c] = 0\n if first_column_zero:\n for r in range(n_rows):\n array[r][0] = 0\n\n return array", "def solution(A):\n \n cars = 0\n ones = 0\n\n for i in range(len(A), 0, -1):\n\n if A[i-1] == 1:\n ones += 1\n else:\n cars += ones\n\n return (-1 if cars > 1000000000 else cars)", "def _make_zero(p):\n\n return [pi == 0 for pi in p]", "def solve_ok(number: int) -> int:\n return no_ones(number) % 2", "def solution(num):\n \n res = [0] * (num + 1)\n for i in range(1,len(res)):\n res[i] = res[i>>1] + (i%2)\n \n return res", "def duplicateZeros(self, arr: List[int]) -> None:\n zero = 0\n i = 0\n while i + zero < len(arr):\n if arr[i] == 0:\n zero += 1\n i += 1\n \n if i + zero > len(arr):\n arr[-1] = 0\n i -= 1\n zero -= 1\n \n i -= 1\n j = i + zero\n while j >= 0:\n if arr[i]:\n arr[j] = arr[i]\n else:\n arr[j] = 0\n j -= 1\n arr[j] = 0\n j -= 1\n i -= 1", "def ones(cls):\n return super().ones(4, 4)", "def test_count_binary_decisions(self):\n abs_centered_quantized_data_0 = numpy.array([0.75, 0.05, 0.1, 0.2, 0.2, 0.15], dtype=numpy.float32)\n bin_width_test_0 = 0.05\n abs_centered_quantized_data_1 = numpy.array([210., 6., 9., 6.], dtype=numpy.float32)\n bin_width_test_1 = 3.\n truncated_unary_prefix = 7\n \n (cumulated_zeros_0, cumulated_ones_0) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_0,\n bin_width_test_0,\n truncated_unary_prefix)\n (cumulated_zeros_1, cumulated_ones_1) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_1,\n bin_width_test_1,\n truncated_unary_prefix)\n print('1st experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_0)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 1, 1, 1, 2, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_0)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([6, 5, 4, 3, 1, 1, 1]))\n print('\\n2nd experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_1)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 0, 2, 1, 0, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_1)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([4, 4, 2, 1, 1, 1, 1]))", "def fn(mask, k):\n if not mask: return 0 \n ans = inf \n for i in range(n): \n if mask & (1<<i): \n ans = min(ans, (nums1[i]^nums2[k]) + fn(mask^(1<<i), k+1))\n return ans", "def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer", "def bool_both_zero_compute(juduged_min, juduged_max):\n dtype = juduged_min.dtype\n tensor_zero = topi.full(juduged_min.shape, dtype, dc.zero_const(dtype))\n min_abs = topi.abs(juduged_min)\n max_abs = topi.abs(juduged_max)\n min_max_replace = topi.add(min_abs, max_abs)\n # just check wether min and max are all zero, if true return 0\n bool_min_max_product_less_zero = less_compare_float32(min_max_replace, tensor_zero)\n bool_min_max_product_more_zero = less_compare_float32(tensor_zero, min_max_replace)\n bool_both_zero = topi.add(bool_min_max_product_less_zero, bool_min_max_product_more_zero)\n\n return bool_both_zero", "def onequbit_modes(statemat):\n nqubit = int(np.log2(statemat.shape[0]))\n rep = np.array(list(itertools.product((0, 1), repeat=nqubit)))\n inds = [i for i, x in enumerate(np.sum(rep, 1)) if x==1]\n \n instates = np.around(statemat[:, inds], 3)\n\n outstates = np.zeros((len(inds), len(inds)), dtype=complex)\n #print(inds)\n for ii in range(len(inds)):\n shortstate = np.around(instates[sum(instates[:,ii].nonzero()), ii], 3).todense()\n outstates[:, ii] = np.squeeze( np.array( shortstate ) )\n \n return outstates", "def duplicateZeros(self, arr: List[int]) -> None:\n i = 0\n j = 0\n n = len(arr)\n while i < n:\n if arr[i] == 0:\n j += 1\n i += 1\n j += 1\n i -= 1\n j -= 1\n while i >= 0:\n if j < n:\n arr[j] = arr[i]\n if arr[i] == 0:\n j -= 1\n if j < n:\n arr[j] = 0\n i -= 1\n j -= 1", "def moveZeroes(self, nums: List[int]) -> None:\n count = 0\n ans = []\n for num in nums:\n if num != 0:\n ans.append(num)\n else:\n count += 1\n for zero in range(count):\n ans.append(0)\n return ans", "def map_zero_one(x, a, b):\n assert b > a\n s = 1./(b - a)\n t = a/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<0] = 0\n return y", "def ones(cls):\n return super().ones(3, 3)", "def andbits(num1,num2):\n thingstoadd=[]\n for i in range(31):\n bit1=setbit(num1,i)\n bit2=setbit(num2,i)\n bit1=shiftleft(bit1,30 - i)\n bit2=shiftleft(bit2,30 - i)\n bitsum=add(bit1,bit2)\n bitsum=setbit(bitsum,31)\n bitsum=shiftright(bitsum,31 - i)\n thingstoadd.append(bitsum)\n bit1=setbit(num1,31)\n bit2=setbit(num2,31)\n bit1=shiftright(bit1,1)\n bit2=shiftright(bit2,1)\n bitsum=add(bit1,bit2)\n bitsum=setbit(bitsum,31)\n thingstoadd.append(bitsum)\n return sum(thingstoadd)", "def compareones_c(w1,w2,tn):\n nw1 = np.int_(np.copy(w1))\n nw2 = np.int_(np.copy(w2))\n code = \"\"\"\n int s;\n s = 0;\n for(int i = 0; i < n; i++)\n {\n if((nw1[i] == 1)&(nw2[i] == 1))\n {\n s += 100;\n }\n }\n return_val = s;\n \"\"\"\n n = len(w2)\n res = inline(code, ['nw1','nw2','n'], headers = ['<math.h>'], compiler = 'gcc')\n return res / float(tn)", "def eqcounter(nodes):\r\n nnodes = nodes.shape[0]\r\n IBC = np.zeros([nnodes, 3], dtype=np.integer)\r\n for i in range(nnodes):\r\n for k in range(3):\r\n IBC[i, k] = int(nodes[i, k+3])\r\n neq = 0\r\n for i in range(nnodes):\r\n for j in range(3):\r\n if IBC[i, j] == 0:\r\n IBC[i, j] = neq\r\n neq = neq + 1\r\n return neq, IBC", "def setZeroes(self, matrix: List[List[int]]) -> None:\r\n \r\n rows, cols = len(matrix), len(matrix[0])\r\n \r\n #This solution has been done in constant space\r\n #using first row and first column for deciding which rows and columns\r\n #should be zero\r\n \r\n #using one extra variable for first row\r\n #because first cell is common in first row and column\r\n first_row = 1\r\n \r\n for i in range(rows):\r\n for j in range(cols):\r\n if matrix[i][j]==0:\r\n #for column\r\n matrix[0][j] = 0\r\n \r\n #for row\r\n if i==0:\r\n first_row = 0\r\n else:\r\n matrix[i][0] = 0\r\n \r\n #checking for rows except first row\r\n for i in range(1,rows):\r\n if matrix[i][0] == 0:\r\n for j in range(cols):\r\n matrix[i][j] = 0\r\n \r\n #checking for columns except first column\r\n for j in range(1, cols):\r\n if matrix[0][j] == 0:\r\n for i in range(rows):\r\n matrix[i][j] = 0\r\n \r\n #for first column\r\n if matrix[0][0] == 0:\r\n for i in range(rows):\r\n matrix[i][0] = 0\r\n \r\n #for first row\r\n if first_row == 0:\r\n for j in range(cols):\r\n matrix[0][j] = 0\r\n \r\n #print(matrix)", "def one(x):\n return zero(x) + 1", "def duplicateZeros(self, arr: List[int]) -> None:\n zeros = arr.count(0)\n for i in reversed(range(len(arr))): \n if i + zeros < len(arr): \n arr[i+zeros] = arr[i]\n if arr[i] == 0: \n zeros -= 1\n if i + zeros < len(arr): \n arr[i+zeros] = arr[i]", "def singles(counts):\n return (counts==1).sum()", "def ones(num):\n if num < 1:\n raise IndexError('num must be >= 1.')\n return Vector.fromSequence([1] * num)", "def single_number(nums):\n tmp = 0\n for num in nums:\n tmp ^= num\n marker = 1\n while marker & tmp != marker:\n marker <<= 1\n a = 0\n for num in nums:\n if marker & num:\n a ^= num\n b = tmp ^ a\n return [a, b]", "def ones(n, offset=0):\n\n if (n == float('inf')):\n return -1\n\n return ((1 << n) - 1) << offset", "def moveZeroes(self, nums: List[int]) -> None:\n # Treating the corner case first:\n if len(nums) == 0 or len(nums) == 1:\n return\n # Treating the general cases...\n counter = 0\n for i in range(0, len(nums)):\n if nums[i] == 0:\n counter += 1\n left = 0\n right = 0\n while right < len(nums):\n if nums[left] == 0 and nums[right] == 0:\n right += 1\n elif nums[left] == 0 and nums[right] != 0:\n nums[left] = nums[right]\n left += 1\n right += 1\n elif nums[left] != 0 and nums[right] == 0:\n right += 1\n else:\n nums[left] = nums[right]\n left += 1\n right += 1\n i = -1\n while counter > 0:\n nums[i] = 0\n counter -=1\n i -= 1\n return", "def fn(vals):\n total = odd = 0 \n for i, x in enumerate(vals): \n if vals[0] == x: \n total += 1\n if i&1: odd += 1\n elif vals[0] ^ x != (1 << n) - 1: return inf\n ans = inf \n if len(vals) <= 2*total <= len(vals)+1: ans = min(ans, odd)\n if len(vals)-1 <= 2*total <= len(vals): ans = min(ans, total - odd)\n return ans", "def moveZeroes1(self, nums: List[int]) -> None:\n count = 0 \n for i in range(len(nums)-1) :\n i = i - count\n if nums[i] == 0 :\n count += 1\n nums.pop(i)\n \n nums.extend([0]*count)", "def zeros_like(self):\n raise NotImplementedError", "def set_zero(mn_matrix): \r\n \r\n row_flag = [0]*len(mn_matrix)\r\n column_flag = [0]*len(mn_matrix[0])\r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if mn_matrix[i][j] == 0:\r\n row_flag[i] = 1\r\n column_flag[j] = 1\r\n \r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if row_flag[i] == 1 or column_flag[j] == 1: \r\n mn_matrix[i][j] = 0", "def solution(array):\n rows = array.shape[0]\n cols = array.shape[1]\n result = np.ones((rows,cols))\n result[1:rows-1,1:cols-1] = 0\n return result", "def setZeroes(matrix):\r\n \r\n #An average O(n^2) time traversal solution with memoization\r\n #for each 0 we encounter, we update the entire row and column to 0s, but on the condition that the row/column has not been updated yet\r\n \r\n row_cache = {}\r\n column_cache = {}\r\n \r\n for r in range(0,rows := len(matrix)):\r\n for c in range(0,cols := len(matrix[0])):\r\n \r\n if matrix[r][c] == 0:\r\n \r\n if not row_cache.get(r):\r\n for i in range(0,cols):\r\n if matrix[r][i] != 0:\r\n matrix[r][i] = '0' #we use strings so we only consider the initial 0s\r\n row_cache[r] = True\r\n \r\n if not column_cache.get(c):\r\n for i in range(0,rows):\r\n if matrix[i][c] != 0:\r\n matrix[i][c] = '0'\r\n column_cache[c] = True\r\n return", "def solve(given: np.array) -> np.array:\n possible = np.full((9, 9, 9), True)\n mask = given > 0\n possible[mask, :] = False\n possible[mask, given[mask] - 1] = True\n\n # number of possibilities at each site, masking those already propagated\n # to avoid repetitive work. All masked == problem solved\n count = ma.array(possible.sum(axis=2), fill_value=1)\n\n # allocate upfront to as out parameter to np.equal\n # (ma.array because count is ma.array)\n where = ma.array(np.empty((9, 9), dtype=bool), fill_value=False)\n\n stack = [(possible, count)]\n while stack:\n node, count = stack.pop()\n unsolved = propagate(node, count, where)\n if unsolved == -1:\n continue\n if unsolved == 0:\n break\n # try all possibilities from cell with fewest > 1\n i, j = np.unravel_index(count.argmin(), count.shape)\n for k in np.flatnonzero(node[i, j, :]):\n node_copy, count_copy = node.copy(), count.copy()\n node_copy[i, j, :] = False\n node_copy[i, j, k] = True\n count_copy[i, j] = 1\n stack.append((node_copy, count_copy))\n\n i, j, k = node.nonzero()\n count[i, j] = k + 1\n return np.array(count)", "def _get_masks(n_input, n_hiddens, switched_dependencies=False):\n masks = []\n units = [n_input] + n_hiddens + [n_input]\n for layer in range(len(units)-1):\n n_in = units[layer]\n n_out = units[layer+1]\n\n mask = np.ones((n_in, n_out)) if not switched_dependencies else np.zeros((n_in, n_out))\n diagonalzeros = True if layer == len(units)-2 else False\n\n assert n_in % n_out == 0 or n_out % n_in == 0\n if n_out >= n_in:\n k = int(n_out / n_in)\n for i in range(n_in):\n mask[i, :i * k] = 0 if not switched_dependencies else 1\n if diagonalzeros and not switched_dependencies:\n mask[i, i * k:(i + 1) * k] = 0\n if not diagonalzeros and switched_dependencies:\n mask[i, i * k:(i + 1) * k] = 1\n else:\n k = int(n_in / n_out)\n for i in range(n_out):\n mask[(i + 1) * k:, i] = 0 if not switched_dependencies else 1\n if diagonalzeros and not switched_dependencies:\n mask[i * k:(i + 1) * k, i] = 0\n if not diagonalzeros and switched_dependencies:\n mask[i * k:(i + 1) * k, i] = 1\n\n masks.append(mask.astype(np.float32))\n\n return masks", "def ones():\n return Vec2d(1, 1)", "def possibilities(board):\n return board[np.where(board == 0)]", "def duplicateZeros(self, arr: List[int]) -> None:\n \n n = len(arr)\n z = 0\n \n # Decide final point\n for left in range(n):\n if left > n - 1 - z:\n break\n \n if arr[left] == 0:\n if left == n - 1 - z:\n arr[-1] = 0\n n = n - 1\n break\n z += 1\n \n \n # in-place process\n idx = n - 1\n while idx > 0:\n if arr[idx - z] == 0:\n arr[idx], arr[idx - 1] = 0, 0\n z -= 1\n idx -= 1\n else:\n arr[idx] = arr[idx - z]\n idx -= 1", "def fn(i):\n if i == 2*n-1 or ans[i] and fn(i+1): return True \n for x in reversed(range(1, n+1)): \n if x not in ans: \n ii = x if x > 1 else 0 \n if i+ii < 2*n-1 and ans[i] == ans[i+ii] == 0: \n ans[i] = ans[i+ii] = x\n if fn(i+1): return True \n ans[i] = ans[i+ii] = 0", "def threesumzero(a):\n result = set() # set to eliminate deuplicates!\n a.sort() # sort it\n n = len(a) # number of items in a\n for x in a: # for each element in a, see if its part of some triplet\n target_sum = 0 - x # x and other two items must sum to 0, so other two must sum to -x\n i = 0 # from beginning\n j = n - 1 # from end\n while i < j:\n if a[i] + a[j] == target_sum: # we found triplet\n result.add( tuple( sorted( [x, a[i], a[j] ] ) ) ) # sort it (for uniqueness) and convert it to tuple as its immutable\n i += 1\n j -= 1\n elif a[i] + a[j] > target_sum:\n j -= 1\n else: # a[i] + a[j] < target_sum\n i += 1\n\n return result", "def _pattern01(width):\n zeroes = core.Constant(0, width)\n return operation.Concat(zeroes, ~zeroes)", "def fn(x, mask):\n ans = size = 0 \n for xx in range(1, 10): \n if not mask & (1 << xx): \n if (x, xx) not in mp or mask & 1 << mp[x, xx]: \n ans += fn(xx, mask^(1<<xx))\n size += 1\n size = 9 - size\n if m <= size <= n: ans += 1\n return ans", "def offspring_fertility(n1=4,n2=4):\n ary = np.zeros( (n1,n2, n1,n2, 3), float )\n for i in range(n1):\n for j in range(n2):\n for k in range(n1):\n for l in range(n2):\n # set group counter to zero (one counter is sufficient)\n gc1 = 0\n for index in [i,j,k,l]: \n if index in [0,1]: gc1+=1\n if gc1==0 or gc1==4:\n ary[i,j,k,l,0] = 1. # set mark at S0\n elif gc1==1 or gc1==3:\n ary[i,j,k,l,2] = 1. # set mark at S2\n else:\n ary[i,j,k,l,1] = 1. # set mark at S1\n return ary", "def fn(mask, j):\n ans = 0 \n for i in range(m): \n if not mask & (1<<i): \n ans = max(ans, fn(mask^(1<<i), j-1) + score[i][j])\n return ans", "def solution(n: int) -> int:\n binary_gap = 0\n count = 0\n # skip the lowest zeros\n while n and (n & 1) == 0:\n n = n >> 1\n while n:\n while n & 1:\n n = n >> 1\n while n and (n & 1) == 0:\n count += 1\n n = n >> 1\n if n & 1 and binary_gap < count:\n binary_gap = count\n count = 0\n return binary_gap", "def majority_bitwise(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = 0\n for bit_array_subset in \\\n itertools.combinations(bit_arrays, MINIMUM_MAJORITY):\n answer |= functools.reduce(operator.and_, bit_array_subset)\n\n return answer", "def moveZeroes(self, nums: List[int]) -> None:\n i = 0\n while i != len(nums) and nums[i]:\n i += 1\n j = i\n while j != len(nums):\n if not nums[j]:\n j += 1\n else:\n nums[i] = nums[j]\n i += 1\n j += 1\n while i != len(nums):\n nums[i] = 0\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n\n n = len(nums)\n while True:\n while i < n and nums[i]:\n i += 1\n j = max(i, j) + 1\n while j < n and not nums[j]:\n j += 1\n # i points the first zero\n # j points the first non-zero after i\n # between [i, j) are zeros\n if i == n or j == n:\n break\n self.swap(nums, i, j)", "def gen_all_holds(hand):\n without_repeat = []\n mask_seque = list(gen_all_sequences([0,1], len(hand)))\n for dum_i in mask_seque:\n without_repeat.append(())\n \n for dum_i in range(len(mask_seque)):\n for dum_j in range(len(mask_seque[dum_i])):\n if (mask_seque[dum_i][dum_j]==1):\n without_repeat[dum_i]=list(without_repeat[dum_i])\n without_repeat[dum_i].append(hand[dum_j])\n without_repeat[dum_i]=tuple(without_repeat[dum_i])\n \n without_repeat = set(tuple(without_repeat))\n return without_repeat", "def matZeros(shape):\n return [[0 for y in range(shape[1])] \\\n for x in range(shape[0])]", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n N = len(nums)\n while j < N:\n while j < N and nums[j] == 0:\n j += 1\n if j >= N:\n break\n nums[i] = nums[j]\n i += 1\n j += 1\n while i < N:\n nums[i] = 0\n i += 1", "def solution(A):\n xor = 0\n for item in A:\n xor ^= item\n return xor", "def new_binomial_prefactor(s,l1,l2,PAx,PBx):\n with loops.Scope() as L:\n L.total = 0.\n L.t = 0\n for _ in L.while_range(lambda: L.t < s + 1):\n #TEMP TODO rewrite this. The cond_range causes a huge overhead.\n # Try Valeev implementation\n for _ in L.cond_range(((s - l1) <= L.t) & (L.t <= l2)):\n L.total += binomials[l1,s-L.t] * binomials[l2,L.t] * PAx[l1-s + L.t] * PBx[l2 - L.t]\n L.t += 1\n return L.total", "def test_jaccard_index_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = jaccard_index(vector1, vector1)\n score12 = jaccard_index(vector1, vector2)\n score22 = jaccard_index(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def reduce_bits(nums):\n num1, num2 = nums\n result = \"\"\n for index, bit in enumerate(num1):\n result += bit if bit == num2[index] else '-'\n return Term(result)", "def zeroes(m, n):\n\n return [[0] * n for i in range(m)]", "def solution(xs):\n if len(xs)==1:\n return str(xs[0])\n count_neg = 0\n count_pos = 0\n count_zero = 0\n prod_pos = 1\n prod_neg = 1\n max_neg = -float(\"inf\")\n for x in xs:\n if x ==0:\n count_zero+=1\n elif x < 0:\n pos_or_neg = True\n count_neg+=1\n prod_neg *= x\n max_neg = max(max_neg,x)\n elif x > 0:\n count_pos+=1\n pos_or_neg = True\n prod_pos *= x\n if (count_pos == 0 and count_neg == 0) or (count_zero > 0 and count_neg == 1 and count_pos == 0):\n return str(0)\n if count_neg%2!=0:\n count_neg-=1\n prod_neg //=max_neg\n \n if not (count_pos == 0 and count_neg == 0):\n return str(prod_neg*prod_pos)\n return str(0)", "def check(self, i, j):\n possible=np.ones((10), np.int)\n for k in range(0, 9):\n if k==j: continue\n possible[self.a[i][k]]=0\n\n for k in range(0, 9):\n if k==i: continue\n possible[self.a[k][j]]=0\n for a1 in range(0, 3):\n for b1 in range(0, 3):\n if (i//3)*3+a1==i and (j//3)*3+b1==j:\n continue\n possible[self.a[(i//3)*3+a1][(j//3)*3+b1]]=0\n return possible", "def zero_comb(num_list):\n return {tuple(sorted(n)) for n in combinations(num_list, 3) if sum(n) == 0}", "def create(matrix):\n limit_y = len(matrix)\n limit_x = len(matrix[0])\n\n for y in range(1, limit_y):\n bit.create(matrix[y])\n\n for x in range(1, limit_x):\n for y in range(1, limit_y):\n k = y + (y & -y)\n if k < limit_y:\n matrix[k][x] += matrix[y][x]", "def fn(i, s0, s1, c0, c1):\n if s0 > n or s1 > n: return 0 # impossible \n if i == len(balls): return int(c0 == c1)\n ans = 0 \n for x in range(balls[i]+1): \n ans += fn(i+1, s0+x, s1+balls[i]-x, c0+(x > 0), c1+(x < balls[i])) * comb(balls[i], x)\n return ans", "def duplicateZeros(self, arr) -> None:\n tmp = list()\n for i,v in enumerate(arr):\n if v==0:\n tmp.append(0)\n tmp.append(0)\n else:\n tmp.append(v)\n\n arr[i] = tmp[i]", "def onehot(isTrue):\n if isTrue:\n return [1, 0]\n else:\n return [0, 1]", "def get_sum_zero_pairs(numbers):\n numbers = set(numbers)\n numbers = list(numbers)\n pairs_that_add_to_zero = []\n\n for i, item in enumerate(numbers):\n if numbers[i] == len(numbers):\n break\n\n if numbers[i] == 0:\n pairs_that_add_to_zero.append([0, 0]) \n\n for j in range(i+1, len(numbers)):\n total_of_two_items = numbers[i] + numbers[j]\n if (total_of_two_items == 0):\n pairs_that_add_to_zero.append([numbers[i], numbers[j]]) \n\n return pairs_that_add_to_zero", "def same_bits_up(x):\n\n bit_array = get_first(x, get_consecutive_01)\n\n if (bit_array == 0):\n return x\n\n lower, upper = split_bits_bit_array(x, bit_array)\n upper = swap_bits_bit_array(upper, bit_array)\n lower = shift_trailing_zeros(lower)\n\n return upper | lower", "def update_gol(arr):\n nxt = np.zeros(arr.shape)\n rows,cols = nxt.shape\n for i in range(rows):\n for j in range(cols):\n nn = sum_vonneuman_nn(arr,i,j)\n if arr[i][j]==1:\n if nn==2 or nn==3:\n nxt[i][j]=1\n else:\n if nn==3:\n nxt[i][j]=1\n return nxt", "def setZeroes(self, matrix: List[List[int]]) -> None:\n if not matrix:\n return\n row = set()\n col = set()\n rowl = len(matrix[0])\n for i, r in enumerate(matrix):\n for j, c in enumerate(r):\n if c == 0:\n row.add(i)\n col.add(j)\n \n for r in row:\n matrix[r] = [0]*rowl\n for j in col:\n for i in range(len(matrix)):\n matrix[i][j] = 0", "def moveZeroes(self, nums: List[int]) -> None:\n zeros=0\n for i in range(len(nums)):\n if nums[i]==0:\n zeros+=1\n else:\n nums[i-zeros]=nums[i]\n for i in range(len(nums)-zeros,len(nums)):\n nums[i]=0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m = len(matrix)\n n = len(matrix[0])\n \n row_1st_0 = False\n col_1st_0 = False\n \n ## check whether 1st row and 1st col contains 0\n for i in range(n):\n if matrix[0][i] == 0:\n row_1st_0 = True\n break\n for j in range(m):\n if matrix[j][0] == 0:\n col_1st_0 = True\n break\n \n ## check row 1:m and col 1:n, store the 0 indicator to 1st row and 1st col\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n \n ## update cols to zero except 1st row\n for i in range(1, n):\n if matrix[0][i] == 0:\n for r in range(1, m):\n matrix[r][i] = 0\n \n ## update rows to zero except 1st col\n for j in range(1, m):\n if matrix[j][0] == 0:\n matrix[j][1:n] = [0 for i in range(n-1)]\n \n ## update 1st row and 1st col to 0\n if row_1st_0:\n matrix[0][0:n] = [0 for i in range(n)]\n if col_1st_0:\n for r in range(m):\n matrix[r][0] = 0", "def subsequent_mask(lens):\n bs, max_len = len(lens), max(lens)\n mask = torch.ones([bs, max_len, max_len]).tril_(0)\n mask = mask > 0\n return mask", "def TwoComp(n):\n\n l = list(n)\n for i in range(len(l)):\n l[i] = \"0\" if l[i] == \"1\" else \"1\"\n return BitAdd(\"\".join(l), \"1\", len(l))", "def measure_all_1(n, state):\n state = state.copy()\n\n outs = ''\n for i in range(n):\n out = measure_single(n, state,\n i) # After measuring bit0, bit0 collapses. It affects the subsequent bit1 measurement, but does not affect the 1000 independent measurements of the upper layer\n outs = str(out) + outs # from low bit to high bit\n return outs", "def binary_add(x, y):\n # Makes sure that the arrays have the same length.\n # Could be changed to padding on extra zeroes, if so desired.\n assert(len(x) == len(y))\n\n z = [0] * (len(x)+1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n # Makes sure that the array is a binary array.\n # Strictly speaking, not necessary. But nice.\n if i not in [0, 1]: return False\n if j not in [0, 1]: return False\n\n # if i and j are both 1 \n if i and j:\n z[a] += 0\n z[a+1] += 1\n # if only one of them is 1\n elif i or j:\n z[a] += 1\n # if they're both 0\n else: pass\n\n if z[a] == 2:\n z[a+1] += 1\n z[a] -= 2\n \n return z[::-1]", "def moveZeroes(self, nums: List[int]) -> None:\n ##brute force\n # counts= nums.count(0)\n # while 0 in nums: nums.remove(0)\n # nums+=[0]*counts\n \n ## two pointer (swapping)\n if not nums: return None\n anchor, explore= 0, 0\n while explore <len(nums):\n if nums[explore]!=0 and explore!=anchor:\n temp= nums[anchor]\n nums[anchor]=nums[explore]\n nums[explore]=temp\n if nums[anchor]!= 0:\n anchor+=1\n explore+=1", "def _get_possible_outcomes(m, bits):\n\n # This is filled with loads of dirty binary tricks...You have been warned\n\n size = max(m.shape) # Max of shape to account for bra or ket\n nqubits = int(math.log(size, 2) + .1) # Number of qubits possible\n\n # Make the output states and put in output_matrices, nothing in them now.\n # Each state will represent a possible outcome of the measurement\n # Thus, output_matrices[0] is the matrix which we get when all measured\n # bits return 0. and output_matrices[1] is the matrix for only the 0th\n # bit being true\n output_matrices = []\n for i in range(1 << len(bits)):\n output_matrices.append(zeros(2**nqubits, 1))\n\n # Bitmasks will help sort how to determine possible outcomes.\n # When the bit mask is and-ed with a matrix-index,\n # it will determine which state that index belongs to\n bit_masks = []\n for bit in bits:\n bit_masks.append(1 << bit)\n\n # Make possible outcome states\n for i in range(2**nqubits):\n trueness = 0 # This tells us to which output_matrix this value belongs\n # Find trueness\n for j in range(len(bit_masks)):\n if i & bit_masks[j]:\n trueness += j + 1\n # Put the value in the correct output matrix\n output_matrices[trueness][i] = m[i]\n return output_matrices", "def moveZeroes(self, nums: List[int]) -> None:\n N = len(nums)\n l = 0\n r = 0\n while r < N:\n if nums[l] == 0 and nums[r] != 0:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n elif nums[r] == 0:\n r += 1\n else:\n l += 1\n r += 1", "def move_zeros(items: list) -> Iterable[int]:\n return [x for x in items if x != 0] + [x for x in items if x == 0]", "def invert_zero_one(sequence):\n return [1 - code for code in sequence]", "def _make_masks(ilens, olens):\n # (B, T_in)\n in_masks = make_non_pad_mask(ilens)\n # (B, T_out)\n out_masks = make_non_pad_mask(olens)\n # (B, T_out, T_in)\n\n return paddle.logical_and(\n out_masks.unsqueeze(-1), in_masks.unsqueeze(-2))", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def moveZeroes(self, nums: List[int]) -> None:\n n = len(nums)\n i, j = 0, 0\n # i记录最靠前的0的位置\n # j记录从i开始第一个非0的位置\n while j < n:\n while i < n-1 and nums[i] != 0:\n i += 1\n j = i + 1\n while j < n and nums[j] == 0:\n j += 1\n if j > n-1:\n break\n nums[i], nums[j] = nums[j], nums[i]\n\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1", "def triplets():\n for a in xrange(1, 1000):\n for b in xrange(a, 1000):\n c = 1000 - (a + b)\n if a**2 + b**2 == c**2:\n return a, b, c", "def setZeroes(self, matrix: List[List[int]]) -> None:\n \n m = len(matrix)\n n = len(matrix[0])\n \n i_zeros = []\n j_zeros = []\n \n for i in range(m): ## find the zeros\n for j in range(n):\n \n if matrix[i][j] == 0:\n i_zeros.append(i)\n j_zeros.append(j)\n \n for i in i_zeros:\n matrix[i] = [0]*n\n \n for j in j_zeros:\n for i in range(m):\n matrix[i][j] = 0", "def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def osd(counts):\n return (counts!=0).sum(), (counts==1).sum(), (counts==2).sum()" ]
[ "0.6483679", "0.624784", "0.61705774", "0.610907", "0.6090902", "0.6031265", "0.59823155", "0.59540856", "0.59308344", "0.59170455", "0.5842495", "0.5830201", "0.5826083", "0.5814207", "0.578468", "0.57763344", "0.5771295", "0.57675946", "0.5759372", "0.5745411", "0.5724746", "0.5723461", "0.5713244", "0.5685539", "0.5685301", "0.5676476", "0.5657328", "0.56461656", "0.5642397", "0.5629498", "0.56216425", "0.5609396", "0.55813867", "0.557257", "0.556937", "0.55691636", "0.5566569", "0.55565035", "0.5550816", "0.5531341", "0.55044776", "0.5493055", "0.5492269", "0.54828584", "0.5479583", "0.54754764", "0.5475407", "0.5469933", "0.54697853", "0.54606646", "0.54596263", "0.5455562", "0.54481804", "0.54436356", "0.5443365", "0.54360634", "0.54301465", "0.542594", "0.542502", "0.5422397", "0.5417279", "0.541509", "0.54096484", "0.5397182", "0.53933704", "0.5388943", "0.53859484", "0.5375774", "0.53723276", "0.5371568", "0.53702945", "0.53677887", "0.53643405", "0.53555745", "0.5353314", "0.53506994", "0.53493553", "0.53482103", "0.53469527", "0.53461796", "0.5326446", "0.5321244", "0.53176385", "0.53139806", "0.5308967", "0.5308668", "0.5308062", "0.530451", "0.5302518", "0.52998793", "0.52930075", "0.52881587", "0.5285078", "0.52850354", "0.52846307", "0.52832174", "0.5279823", "0.52709585", "0.5270728", "0.5264962" ]
0.6798596
0
Implements a descending exponential cooling scheme. Temperature becomes zero after all iterations have been passed.
def update_temperature(self): self.iteration += 1 self.T = self.T0 * 0.9935**self.iteration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def constant_temp(self, numIterations):\n return 1 + self.alpha", "def _etaE_cool(self,x):\n return self._eta_sfr_scaling(x,'E_cool')", "def _etaE(self,x):\n return self._etaE_cool(x) + self._etaE_hot(x)", "def sim_alternating_exp():\n catches = 0\n for _ in range(100000):\n j = np.random.uniform()*1000\n # j = np.random.exponential(500)\n t_i = 0\n i = 0\n while t_i < j+100:\n if i % 2 == 1:\n t_i += np.random.exponential(10)\n else:\n t_i += np.random.exponential(20)\n if j < t_i and t_i < j+1:\n catches += 1\n i += 1\n print(catches/100000)", "def simpleCool(T0, i):\n\talpha = 0.99995\n\treturn (alpha ** i) * T0", "def activation(self,z):\n #using the clipping function as there is no need for it because the values larger or smaller would be very close to zero\n return 1./(1. + np.exp(-np.clip(z,-250,250)))", "def exponential(min_iterations, i, start = start_temp, final = final_temp):\n\n\ttemperature = (start * (final / start) ** (i / min_iterations))\n\n\treturn temperature", "def linear_decrease_temp(self, numIterations):\n\n denom = numIterations*self.alpha\n return self.maxCount/denom if denom > 0 else 0", "def cool(self):\n self.t = self.t - 1", "def temp(self, numIterations):\n return self.linear_decrease_temp(numIterations)", "def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)", "def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function", "def activation(self, z):\r\n denominator = 1 + np.exp(-z)\r\n result = 1/denominator\r\n return result", "def exponentialMovingAverage(self,new,old, alpha):\n return alpha * new + (1 - alpha)* old", "def exp_warmup(base_value, max_warmup_iter, cur_step):\n if max_warmup_iter <= cur_step:\n return base_value\n return base_value * math.exp(-5 * (1 - cur_step/max_warmup_iter)**2)", "def _etaE_hot(self,x):\n return self._eta_sfr_scaling(x,'E_hot')", "def _decay_rate_pow(i: int, exponent: float = 0.8) -> float:\n t = jnp.array(i, jnp.float32) + 1.0\n return 1.0 - t**(-exponent)", "def evaluate(self, _t):\n\n temp = self.init_temp*(self.decay**_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y", "def linear_decay(x0, alpha, T, t):\n if t <= T:\n return x0 - (1 - alpha) * x0 * t / T\n else:\n return alpha * x0", "def evaluate(self, _t):\n\n temp = self.init_temp - (self.decay*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def test_exp_decay(self, alpha: float):\n x = np.linspace(0, 1, 100)\n y = np.exp(alpha * x)\n\n alpha_guess = guess.exp_decay(x, y)\n\n self.assertAlmostEqualAbsolute(alpha_guess, alpha)", "def activation_function(self, z):\n return 1. / (1. + np.exp(-np.clip(z, -250, 250)))", "def sigmoid_decay(ep, static =5, k=5):\n static = static\n if ep < static:\n return float(1.)\n else:\n ep = ep - static\n factor = k/(k + np.exp(ep / k))\n return float(factor)", "def next(self, dt):\n self.x = self.x + \\\n (self.rate-0.5*self.vola*self.vola)*dt + \\\n sqrt(dt)*self.vola*np.random.normal()\n return exp(self.x)", "def safe_cumprod(x, eps):\n return torch.exp(exclusive_cumsum(torch.log(torch.clamp(x, min=eps, max=1.0))))", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def elo(old, exp, score, k=10):\n return old + k * (score - exp)", "def probability(delta_cost: float, temperature: float, k: float = 1) -> float:\n if delta_cost < 0:\n return 1\n else:\n return np.exp(-delta_cost / (k * temperature))", "def forward(self):\n self.iteration_number += 1\n x = self.x\n self.x = self.alpha * self.x + self.betta\n t = x - self.x\n\n return (t * t).sum()", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def exponential(self, data=[], init_lambdas=[1,0.75], max_iteration=500):\r\n xaxis = np.arange(1, len(data)+1)\r\n data = np.array(data)\r\n idx = 1\r\n lambdas = np.array(init_lambdas)\r\n while idx < max_iteration:\r\n y = [lmbda*np.exp(data*(-lmbda)) for lmbda in lambdas]\r\n weights = y/np.sum(y, axis=0)\r\n coefficients = np.mean(weights, axis=1)\r\n lambdas = np.sum(weights, axis=1)/np.sum(weights*data, axis=1)\r\n idx+=1 \r\n print lambdas, coefficients\r\n return lambdas, coefficients", "def _func_pen(self, coeffs_ext):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n return l_elastic_net * ((1. - eta) * coeffs_ext.sum()\n + 0.5 * eta * np.linalg.norm(coeffs) ** 2)", "def oactivate(self,x):\n value = 1 / (1 + math.exp(-x))\n if value > 0.5:\n return 1\n else:\n return -1", "def sigmoidal(min_iterations, i, start = start_temp, final = final_temp ):\n\n\t# to prevent a math overflow a scale (x^(1/ (i - min_iterations))) is used\n\ttemperature = final + ((start - final)**( 1/ (i - min_iterations))) / \\\n\t\t\t\t\t(1 **(i - min_iterations)) + math.exp(0.3 * ((i - min_iterations / 2) /(i - min_iterations)))\n\n\treturn temperature", "def exponential_window_lssm(self):\n self.A = np.array([self.decay])\n self.s = np.ones(1)\n self.C = np.ones(1)", "def set_coefs_decay(self):\n wt_phenotype = self.epistasis.values[0]\n for order in range(1, self.epistasis.order + 1):\n # Get epistasis map for this order.\n em = self.epistasis.get_orders(order)\n index = em.index\n\n # Randomly choose values for the given order\n vals = 10**(-order) * np.random.uniform(-wt_phenotype,\n wt_phenotype,\n size=len(index))\n\n # Map to epistasis object.\n self.epistasis.data.values[index[0]: index[-1] + 1] = vals\n self.build()\n return self", "def activate(self,x):\n return 1 / (1 + np.exp(-x))", "def schechter(l,alpha):\n return exp(-l)*(l**alpha)", "def exp_func(x, initial, lifetime):\n return initial * np.exp(-x/lifetime)", "def activate_der(self):\r\n\t\treturn self.value * (1 - self.value)", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def test_exp_decay_with_invalid_y(self):\n x = np.array([9.0e-06, 1.9e-05, 2.9e-05, 3.9e-05])\n y = np.array([0.16455749, 0.07045296, 0.02702439, -0.00135192])\n\n # The last point is excluded. This point might be some artifact due to filtering.\n alpha_guess = guess.exp_decay(x, y)\n\n np.testing.assert_almost_equal(alpha_guess, -90326, decimal=0)", "def _etaZ(self,x):\n return self._eta_sfr_scaling(x,'Z_cool')+self._eta_sfr_scaling(x,'Z_hot')", "def test_exp_osci_decay(self, alpha, freq):\n x = np.linspace(0, 1, 100)\n y = np.exp(alpha * x) * np.cos(2 * np.pi * freq * x)\n\n alpha_guess = guess.oscillation_exp_decay(x, y)\n\n self.assertAlmostEqualAbsolute(alpha_guess, alpha)", "def update_exponential(self, Z, eta, BDpair=None):\r\n if eta == 0:\r\n return\r\n if BDpair:\r\n B, D = BDpair\r\n else:\r\n D, B = self.opts['CMA_eigenmethod'](self.C)\r\n D **= 0.5\r\n Csi = dot(B, (B / D).T)\r\n Cs = dot(B, (B * D).T)\r\n self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)), self.opts['CMA_eigenmethod']), Cs))", "def _etap(self,x):\n return self._eta_sfr_scaling(x,'p_cool') + self._eta_sfr_scaling(x,'p_hot')", "def activation_func(x):\r\n a = -1\r\n return 1/(1+np.exp(-a*x))", "def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )", "def reweight_distribution(original_distribution, temperature=0.5):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n distribution = np.log(original_distribution) / temperature\n distribution = np.exp(distribution)\n return distribution / np.sum(distribution)", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def _cross_over(self,mp,cross_rate,eta):", "def exponential_decay(param: float, decay_factor: float, min_val: float) -> float:\n if param > min_val:\n param *= decay_factor\n param = max(param, min_val)\n return param", "def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,\n staircase=False, name=None):\n with ops.op_scope([learning_rate, global_step, decay_steps, decay_rate],\n name, \"ExponentialDecay\") as name:\n learning_rate = ops.convert_to_tensor(learning_rate, name=\"learning_rate\")\n dtype = learning_rate.dtype\n global_step = math_ops.cast(global_step, dtype)\n decay_steps = math_ops.cast(decay_steps, dtype)\n decay_rate = math_ops.cast(decay_rate, dtype)\n p = global_step / decay_steps\n if staircase:\n p = math_ops.floor(p)\n return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)", "def cont_ret(self, rate, T, t = 0):\n return np.exp(rate * (T - t))", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def wce(B):\n return eme*B", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def zeta(self):\r\n raise NotImplementedError('not implemented yet')", "def half_space_cooling_temperature(z, t, T1, T0, kappa):\n return [math.erfc(z1 / (2 * numpy.sqrt(kappa * t))) * (T0 - T1) + T1 for z1 in z]", "def BernoulliExponentialLoss(lamb) :\n def bexl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = K.sum((-1./lamb) + K.log(lamb) - 1, axis=-1)\n return recon+dkl\n return bexl", "def _exponential_backoff(backoff=0.1, max_delay=5):\n attempt = 0\n while True:\n delay = backoff * (2 ** attempt)\n if delay > max_delay:\n \"\"\"prevent redundant calculations\"\"\"\n break\n attempt += 1\n yield delay\n while True:\n yield max_delay", "def evaluate(self, _t):\n\n temp = self.init_temp*np.exp(-1.0*self.exp_const*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def DyCheat(x, t, T0, r2, K, alpha, n):\n D=dmax*x[0]**n/(x[0]**n+K**n)\n #defune the degradation effect\n deg=0\n #define ODEs\n y=np.zeros([np.size(x)])\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r2*(1-x[1])-D-alpha)#d Co/dt\n \n return y", "def erfc(x):\n return 0.0", "def sim_an_exp(neighborhood, Tmax, Tmin, iterations):\n\n temp = Tmax\n\n # set iteration number to 0 at start\n n = 0\n\n plot_list = []\n\n cooling_rate = float(Tmin/Tmax)**(1/iterations)\n\n current_costs = neighborhood.get_total_costs()\n\n while (n < iterations):\n temp = Tmax * (float(cooling_rate)**float(n))\n\n swap_succes = False\n while not swap_succes:\n cable_1 = random.choice(neighborhood.cables)\n cable_2 = random.choice(neighborhood.cables)\n swap_succes = neighborhood.swap_connection(cable_1, cable_2)\n\n new_costs = neighborhood.get_total_costs()\n if (acceptance_probability(current_costs, new_costs, temp) > random.random()):\n current_costs = new_costs\n else:\n cable_1 = neighborhood.cables[-1]\n cable_2 = neighborhood.cables[-2]\n neighborhood.swap_connection(cable_1, cable_2)\n\n plot_list.append(current_costs)\n n += 1", "def cost_derivative(output_activations, y):\n return (output_activations - y)", "def noiseless_function(x):\n return 1/(1+np.exp(-x+5))-0.5", "def _auto_mix(self):\n gamma = np.random.choice([0.5, 1, 1.5, 2])\n offset = np.random.uniform(0, self.n * self.dt)\n empty_window = np.random.uniform(0, self.n * self.dt * 0.5)\n self.t.sort()\n self.t[2 * (self.n // 5):4 * (self.n // 5)] *= gamma\n self.t[3 * (self.n // 5):] += empty_window\n self._add_irregularities(epsilon=np.random.normal(0,\n 0.05 * self.dt,\n self.n))\n self.t.sort()\n self._normalize(offset=offset)", "def _etaM_cool(self,x):\n return self._eta_sfr_scaling(x,'M_cool')", "def exp_decr_lr():\n minlr_div_maxlr = tf.divide(min_lr, max_lr)\n power_iter = tf.divide(global_step, num_iters)\n pow_div = tf.pow(minlr_div_maxlr, power_iter)\n return tf.multiply(max_lr, pow_div, name=name)", "def anneal_temp(self, T):\n new_T = self.anneal_rate * T\n return(new_T)", "def hot_cold_process(\n curr_yr,\n p_cold_rolling_steel,\n assumptions\n ):\n # Reduce demand depending on fraction of hot and cold steel rolling process\n p_cold_rolling_by = assumptions.p_cold_rolling_steel_by\n p_hot_rolling_by = 1.0 - p_cold_rolling_by\n\n # Current year fractions\n p_cold_rolling_cy = p_cold_rolling_steel[curr_yr]\n p_hot_rolling_cy = 1 - p_cold_rolling_cy\n\n # Efficiencies of processes\n eff_cold = assumptions.eff_cold_rolling_process\n eff_hot = assumptions.eff_hot_rolling_process\n\n p_by = p_cold_rolling_by * eff_cold + p_hot_rolling_by * eff_hot\n p_cy = p_cold_rolling_cy * eff_cold + p_hot_rolling_cy * eff_hot\n\n factor = p_cy / p_by\n\n return factor", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def optimal_alpha():\n\n # When I checked all of alphas, -0.01 was the best\n alpha = -0.01\n # np.random.choice([-0.06, -0.01, 0.04, 0.1])\n return alpha", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def empirical_erm(self):\n return lambda samples: np.mean(samples) - 0.5 * self.alpha * np.var(samples)", "def activate(self, x):\n\n return 1.0 / (1 + np.exp(-x))", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def _etaM(self,x):\n return self._etaM_cool(x) + self._etaM_hot(x)", "def activation(x):\n return 1 / (1 + torch.exp(-x))", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def E_generator(beta, eta, h):\n def E(x, y):\n \"\"\"Calculate energy for matrices x, y.\n\n Note: the computation is not localized, so this is quite expensive.\n \"\"\"\n # sum of products of neighboring paris {xi, yi}\n xxm = np.zeros_like(x)\n xxm[:-1, :] = x[1:, :] # down\n xxm[1:, :] += x[:-1, :] # up\n xxm[:, :-1] += x[:, 1:] # right\n xxm[:, 1:] += x[:, :-1] # left\n xx = np.sum(xxm * x)\n xy = np.sum(x * y)\n xsum = np.sum(x)\n return h * xsum - beta * xx - eta * xy\n\n def is_valid(i, j, shape):\n \"\"\"Check if coordinate i, j is valid in shape.\"\"\"\n return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]\n\n def localized_E(E1, i, j, x, y):\n \"\"\"Localized version of Energy function E.\n\n Usage: old_x_ij, new_x_ij, E1, E2 = localized_E(Ecur, i, j, x, y)\n \"\"\"\n oldval = x[i, j]\n newval = oldval * -1 # flip\n # local computations\n E2 = E1 - (h * oldval) + (h * newval)\n E2 = E2 + (eta * y[i, j] * oldval) - (eta * y[i, j] * newval)\n adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n neighbors = [x[i + di, j + dj] for di, dj in adjacent\n if is_valid(i + di, j + dj, x.shape)]\n E2 = E2 + beta * sum(a * oldval for a in neighbors)\n E2 = E2 - beta * sum(a * newval for a in neighbors)\n return oldval, newval, E1, E2\n\n return E, localized_E", "def apply_temperature(prob, temperature):\r\n # Apply temperature\r\n if temperature != 1:\r\n # Inverse sigmoid\r\n x = -np.log(1 / prob - 1)\r\n # Apply temperature to sigmoid function\r\n prob = 1 / (1 + np.exp(-x / temperature))\r\n return prob", "def true_y_exp(x):\n y = torch.exp(x)#torch.sigmoid(x * 5) * 2 #exp(x)\n return y", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def fun_decay_exp_gen(r0):\n def fit_fun(p,r):\n return (p[2] ) * np.exp(-(r-r0)/p[0] ) * np.cos(p[1]*r + p[3]) + (r)*p[4] + p[5]\n return fit_fun", "def InitialCondition():\n maxX = getX(C.N + 1,C.N+1,C.alpha_max)\n y0 = np.zeros(maxX,dtype=complex)\n for i in range(0, C.N+2):\n for j in range(0, C.N+2):\n for alpha in [1]:\n\n X = getX(i, j, alpha)\n\n y0[X] = 1./2./C.N * (1-delta(i, C.N+1))*(1-delta(j, C.N+1))+1./2*delta(i, C.N+1)*delta(j, C.N+1) +\\\n 1./2./(C.N)**0.5 * ((1-delta(i, C.N+1)) *\n delta(j, C.N+1)+(1-delta(j, C.N+1))*delta(i, C.N+1))", "def calculate_probability(energy, new_energy, temperature):\n if new_energy < energy:\n return 1.0\n else:\n return exp(float(energy - new_energy) / temperature)", "def looptcs(self): \n while self.itr < 1: \n #self.genRandomNoise() #developing\n self.tcs(lpf=1)\n #self.itr +=1 ", "def cool_down(heatmap, amount):\n heatmap[heatmap <= amount] = 0\n heatmap[heatmap > amount] -= amount\n return heatmap", "def exponential_decay(mul, base, n, min=0.1):\n return np.vstack([mul * np.power(base, np.arange(n)), [min] * n]).max(0).tolist()", "def decay(time_, max_time, coeff):\n threshold = max_time - time_\n if threshold < 0:\n threshold = 0\n return 1 + threshold * coeff / max_time", "def expansion(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_tp = liq_g(1,1,temp,pres)\n alpha = g_tp / g_p\n return alpha" ]
[ "0.65226835", "0.63014704", "0.613545", "0.6096842", "0.60904115", "0.6022001", "0.59910226", "0.5871977", "0.58546734", "0.5735474", "0.5727299", "0.5672892", "0.5629865", "0.560521", "0.55958956", "0.558111", "0.5577392", "0.5571009", "0.5570365", "0.5565791", "0.5564864", "0.55440617", "0.5510702", "0.5491956", "0.54869795", "0.5478759", "0.54577774", "0.5449468", "0.54316163", "0.5416269", "0.54105544", "0.5399458", "0.53962564", "0.53885627", "0.5387525", "0.53819174", "0.5378591", "0.5376297", "0.53761387", "0.53750753", "0.5365944", "0.53625727", "0.53605545", "0.5355149", "0.5348292", "0.5330904", "0.53304744", "0.5320096", "0.53185844", "0.5316523", "0.53077245", "0.530181", "0.52954644", "0.5283666", "0.52827805", "0.52816284", "0.52811813", "0.5279139", "0.52788097", "0.52787495", "0.5277568", "0.5270399", "0.52694213", "0.52615285", "0.5247024", "0.5232637", "0.5223291", "0.52184063", "0.5216903", "0.5207022", "0.5206471", "0.5206335", "0.52059746", "0.5198742", "0.51969945", "0.51948285", "0.5194112", "0.5192266", "0.5191623", "0.5191507", "0.5190452", "0.5187025", "0.5186606", "0.5183956", "0.5180515", "0.51804", "0.5179708", "0.5179696", "0.51779604", "0.5175868", "0.5171944", "0.5171571", "0.5167037", "0.5164892", "0.5162909", "0.5162442", "0.51595646", "0.5158774", "0.5151048", "0.5147539" ]
0.5585291
15
Calculates new and old K. Checks and accepts better solutions than the current solution. Also sometimes accepts solutions that are worse, depending on the current temperature.
def check_solution(self, potential_solution): old_k = self.K new_k = potential_solution.set_K(self.len_connections) # calculate the probability of accepting this solution delta = new_k - old_k if delta >= 0: probability = 1 probability = np.exp(delta / self.T) # pull a random number between 0 and 1 and see if we accept the solution if random.random() < probability: self.column1.append(self.iteration) self.column2.append(new_k) self.state = potential_solution self.K = new_k # save progress to a csv file with open('annealing.csv', 'w', newline='') as csv_file: fieldnames = ['Iterations', 'K'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for i, j in zip(self.column1, self.column2): writer.writerow({'Iterations': i, 'K': j}) # update the temperature self.update_temperature()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def obrien_fleming_cutoff(K, current_k, alpha):\n\n if not isinstance(K, int) or K < 1 or K > 10:\n raise ValueError('K must be an integer between 1 and 10.')\n\n if not isinstance(current_k, int) or current_k < 1 or current_k > 10:\n raise ValueError('current_k must be an integer between 1 and 10.')\n if current_k > K:\n raise ValueError('current_k must be less than k.')\n\n if alpha not in [0.01, 0.05, 0.1]:\n raise ValueError('alpha must be 0.01, 0.05, or 0.1.')\n\n cutoffs = {\n \"0.01\": [2.576, 2.580, 2.595, 2.939, 2.986, 3.023, 3.053, 3.078, 3.099, 3.117],\n \"0.05\": [1.960, 2.178, 2.289, 2.361, 2.413, 2.453, 2.485, 2.512, 2.535, 2.555],\n \"0.1\": [1.645, 1.875, 1.992, 2.067, 2.122, 2.164, 2.197, 2.225, 2.249, 2.270],\n }\n return cutoffs[str(alpha)][K - 1] * math.sqrt(K / current_k)", "def update_kkrimp_params(self):\n\n decrease_mixing_fac = False\n switch_agressive_mixing = False\n switch_higher_accuracy = False\n initial_settings = False\n\n # only do something other than simple mixing after first kkr run\n if self.ctx.loop_count != 1:\n # first determine if previous step was successful (otherwise try to find some rms value and decrease mixing to try again)\n if not self.ctx.kkr_step_success:\n decrease_mixing_fac = True\n message = 'INFO: last KKR calculation failed. Trying decreasing mixfac'\n self.report(message)\n\n convergence_on_track = self.convergence_on_track()\n\n # check if calculation was on its way to converge\n if not convergence_on_track:\n decrease_mixing_fac = True\n message = 'INFO: Last KKR did not converge. Trying decreasing mixfac'\n self.report(message)\n # reset last_remote to last successful calculation\n last_calcs_list = list(range(len(self.ctx.calcs))) # needs to be list to support slicing\n if len(last_calcs_list) > 1:\n last_calcs_list = array(last_calcs_list)[::-1] # make sure to go from latest calculation backwards\n for icalc in last_calcs_list:\n message = f\"INFO: last calc success? {icalc} {self.ctx.KKR_steps_stats['success'][icalc]}\"\n self.report(message)\n if self.ctx.KKR_steps_stats['success'][icalc]:\n if self.ctx.KKR_steps_stats['last_rms'][icalc] < self.ctx.KKR_steps_stats['first_rms'][icalc]:\n self.ctx.last_remote = self.ctx.calcs[icalc].outputs.remote_folder\n break # exit loop if last_remote was found successfully\n else:\n self.ctx.last_remote = None\n else:\n self.ctx.last_remote = None\n # now cover case when last_remote needs to be set to initial remote folder (from input)\n if self.ctx.last_remote is None:\n if 'kkrimp_remote' in self.inputs:\n messager = 'INFO: no successful and converging calculation to take RemoteData from. Reuse RemoteData from input instead.'\n self.report(message)\n self.ctx.last_remote = self.inputs.kkrimp_remote\n elif 'impurity_info' in self.inputs or 'remote_data' in self.inputs:\n self.ctx.last_remote = None\n # check if last_remote has finally been set and abort if this is not the case\n if self.ctx.last_remote is None:\n messager = 'ERROR: last remote not found'\n self.report(message)\n return self.exit_codes.ERROR_SETTING_LAST_REMOTE # pylint: disable=no-member\n\n # check if mixing strategy should be changed\n last_mixing_scheme = self.ctx.last_params.get_dict()['IMIX']\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n if convergence_on_track:\n last_rms = self.ctx.last_rms_all[-1]\n if last_rms < self.ctx.threshold_aggressive_mixing and last_mixing_scheme == 0:\n switch_agressive_mixing = True\n message = 'INFO: rms low enough, switch to agressive mixing'\n self.report(message)\n\n # check if switch to higher accuracy should be done\n if not self.ctx.kkr_higher_accuracy:\n if self.ctx.kkr_converged: # or last_rms < self.ctx.threshold_switch_high_accuracy:\n switch_higher_accuracy = True\n\n\n# self.report(\"INFO: rms low enough, switch to higher accuracy settings\")\n else:\n initial_settings = True\n self.ctx.kkr_step_success = True\n\n if self.ctx.loop_count > 1:\n last_rms = self.ctx.last_rms_all[-1]\n\n # extract values from host calculation\n host_GF_calc = self.inputs.remote_data.get_incoming(node_class=CalcJobNode).first().node\n host_GF_outparams = host_GF_calc.outputs.output_parameters.get_dict()\n host_GF_inparams = host_GF_calc.inputs.parameters.get_dict()\n nspin = host_GF_outparams.get('nspin')\n non_spherical = host_GF_inparams.get('INS')\n if non_spherical is None:\n non_spherical = kkrparams.get_KKRcalc_parameter_defaults()[0].get('INS')\n self.ctx.spinorbit = host_GF_outparams.get('use_newsosol')\n\n # if needed update parameters\n if decrease_mixing_fac or switch_agressive_mixing or switch_higher_accuracy or initial_settings or self.ctx.mag_init:\n if initial_settings:\n label = 'initial KKR scf parameters'\n description = 'initial parameter set for scf calculation'\n else:\n label = ''\n description = ''\n\n # step 1: extract info from last input parameters and check consistency\n para_check = kkrparams(params_type='kkrimp')\n para_check.get_all_mandatory()\n message = 'INFO: get kkrimp keywords'\n self.report(message)\n\n # init new_params dict where updated params are collected\n new_params = {}\n\n # step 1.2: check if all mandatory keys are there and add defaults if missing\n missing_list = para_check.get_missing_keys(use_aiida=True)\n if missing_list != []:\n kkrdefaults = kkrparams.get_KKRcalc_parameter_defaults()[0]\n kkrdefaults_updated = []\n for key_default, val_default in list(kkrdefaults.items()):\n if key_default in missing_list:\n new_params[key_default] = kkrdefaults.get(key_default)\n kkrdefaults_updated.append(key_default)\n if len(kkrdefaults_updated) > 0:\n self.report('ERROR: no default param found')\n return self.exit_codes.ERROR_MISSING_PARAMS # pylint: disable=no-member\n else:\n message = f'updated KKR parameter node with default values: {kkrdefaults_updated}'\n self.report(message)\n\n # step 2: change parameter (contained in new_params dictionary)\n last_mixing_scheme = para_check.get_value('IMIX')\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n strmixfac = self.ctx.strmix\n aggrmixfac = self.ctx.aggrmix\n nsteps = self.ctx.nsteps\n\n # TODO: maybe add decrease mixing factor option as in kkr_scf wc\n # step 2.1 fill new_params dict with values to be updated\n if decrease_mixing_fac:\n if last_mixing_scheme == 0:\n self.report(f'(strmixfax, mixreduce)= ({strmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(strmixfax, mixreduce)= {type(strmixfac)} {type(self.ctx.mixreduce)}')\n strmixfac = strmixfac * self.ctx.mixreduce\n self.ctx.strmix = strmixfac\n label += f'decreased_mix_fac_str (step {self.ctx.loop_count})'\n description += f'decreased STRMIX factor by {self.ctx.mixreduce}'\n else:\n self.report(f'(aggrmixfax, mixreduce)= ({aggrmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(aggrmixfax, mixreduce)= {type(aggrmixfac)} {type(self.ctx.mixreduce)}')\n aggrmixfac = aggrmixfac * self.ctx.mixreduce\n self.ctx.aggrmix = aggrmixfac\n label += 'decreased_mix_fac_bry'\n description += f'decreased AGGRMIX factor by {self.ctx.mixreduce}'\n\n if switch_agressive_mixing:\n last_mixing_scheme = self.ctx.type_aggressive_mixing\n label += ' switched_to_agressive_mixing'\n description += f' switched to agressive mixing scheme (IMIX={last_mixing_scheme})'\n\n # add number of scf steps, spin\n new_params['SCFSTEPS'] = nsteps\n new_params['NSPIN'] = nspin\n new_params['INS'] = non_spherical\n\n # add ldos runoption if dos_run = True\n if self.ctx.dos_run:\n if self.ctx.lmdos:\n runflags = new_params.get('RUNFLAG', []) + ['lmdos']\n else:\n runflags = new_params.get('RUNFLAG', []) + ['ldos']\n new_params['RUNFLAG'] = runflags\n new_params['SCFSTEPS'] = 1\n\n # turn on Jij calculation if jij_run == True\n if self.ctx.jij_run:\n new_params['CALCJIJMAT'] = 1\n\n # add newsosol\n if self.ctx.spinorbit:\n testflags = new_params.get('TESTFLAG', []) + ['tmatnew']\n new_params['TESTFLAG'] = testflags\n new_params['SPINORBIT'] = 1\n new_params['NCOLL'] = 1\n # TODO add deprecation warning and remove these lines (can be set with params_overwrite instead)\n if self.ctx.mesh_params.get('RADIUS_LOGPANELS', None) is not None:\n new_params['RADIUS_LOGPANELS'] = self.ctx.mesh_params['RADIUS_LOGPANELS']\n if self.ctx.mesh_params.get('NCHEB', None) is not None:\n new_params['NCHEB'] = self.ctx.mesh_params['NCHEB']\n if self.ctx.mesh_params.get('NPAN_LOG', None) is not None:\n new_params['NPAN_LOG'] = self.ctx.mesh_params['NPAN_LOG']\n if self.ctx.mesh_params.get('NPAN_EQ', None) is not None:\n new_params['NPAN_EQ'] = self.ctx.mesh_params['NPAN_EQ']\n new_params['CALCORBITALMOMENT'] = 1\n else:\n new_params['SPINORBIT'] = 0\n new_params['NCOLL'] = 0\n new_params['CALCORBITALMOMENT'] = 0\n new_params['TESTFLAG'] = []\n\n # set mixing schemes and factors\n if last_mixing_scheme > 2:\n new_params['ITDBRY'] = self.ctx.broyden_num\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = aggrmixfac\n new_params['NSIMPLEMIXFIRST'] = self.ctx.nsimplemixfirst\n elif last_mixing_scheme == 0:\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = strmixfac\n\n # add mixing scheme to context\n self.ctx.last_mixing_scheme = last_mixing_scheme\n\n if switch_higher_accuracy:\n self.ctx.kkr_higher_accuracy = True\n\n # add convergence settings\n if self.ctx.loop_count == 1 or self.ctx.last_mixing_scheme == 0:\n new_params['QBOUND'] = self.ctx.threshold_aggressive_mixing\n else:\n new_params['QBOUND'] = self.ctx.convergence_criterion\n\n # initial magnetization\n if initial_settings and self.ctx.mag_init:\n if self.ctx.hfield[0] <= 0.0 or self.ctx.hfield[1] == 0:\n self.report(\n '\\nWARNING: magnetization initialization chosen but hfield is zero. Automatically change back to default value (hfield={})\\n'\n .format(self._wf_default['hfield'])\n )\n self.ctx.hfield = self._wf_default['hfield']\n new_params['HFIELD'] = self.ctx.hfield\n elif self.ctx.mag_init and self.ctx.mag_init_step_success: # turn off initialization after first (successful) iteration\n new_params['HFIELD'] = [0.0, 0]\n elif not self.ctx.mag_init:\n self.report(\"INFO: mag_init is False. Overwrite 'HFIELD' to '0.0' and 'LINIPOL' to 'False'.\")\n # reset mag init to avoid resinitializing\n new_params['HFIELD'] = [0.0, 0]\n\n # set nspin to 2 if mag_init is used\n if self.ctx.mag_init:\n nspin_in = nspin\n if nspin_in is None:\n nspin_in = 1\n if nspin_in < 2:\n self.report('WARNING: found NSPIN=1 but for maginit needs NPIN=2. Overwrite this automatically')\n new_params['NSPIN'] = 2\n message = f'new_params: {new_params}'\n self.report(message)\n\n # overwrite values from additional input node\n if 'params_overwrite' in self.inputs:\n print('use params_overwrite', self.inputs.params_overwrite.get_dict())\n self._overwrite_parameters_from_input(new_params)\n\n # step 2.2 update values\n try:\n for key, val in new_params.items():\n para_check.set_value(key, val, silent=True)\n except:\n message = 'ERROR: failed to set some parameters'\n self.report(message)\n return self.exit_codes.ERROR_PARAMETER_UPDATE # pylint: disable=no-member\n\n # step 3:\n message = f'INFO: update parameters to: {para_check.get_set_values()}'\n self.report(message)\n updatenode = Dict(para_check.get_dict())\n updatenode.label = label\n updatenode.description = description\n paranode_new = updatenode #update_params_wf(self.ctx.last_params, updatenode)\n self.ctx.last_params = paranode_new\n else:\n message = 'INFO: reuse old settings'\n self.report(message)\n\n message = 'INFO: done updating kkr param step'\n self.report(message)", "def check_solution(self, potential_solution):\n\n old_k = self.K\n new_k = potential_solution.set_K(self.len_connections)\n\n # accept the new state when K is higher\n if new_k > old_k:\n self.state = potential_solution\n self.K = new_k", "def ekf(z_k_observation_vector, state_estimate_k_minus_1, control_vector_k_minus_1, P_k_minus_1, dk):\n ######################### Predict #############################\n # Predict the state estimate at time k based on the state\n # estimate at time k-1 and the control input applied at time k-1.\n state_estimate_k = A_k_minus_1 @ (state_estimate_k_minus_1) + (getB(state_estimate_k_minus_1[2],dk)) @ (control_vector_k_minus_1) + (process_noise_v_k_minus_1)\n print(f'State Estimate Before EKF={state_estimate_k}\\r\\n')\n \n # Predict the state covariance estimate based on the previous\n # covariance and some noise\n P_k = A_k_minus_1 @ P_k_minus_1 @ A_k_minus_1.T + (Q_k)\n \n ################### Update (Correct) ##########################\n # Calculate the difference between the actual sensor measurements\n # at time k minus what the measurement model predicted \n # the sensor measurements would be for the current timestep k.\n measurement_residual_y_k = z_k_observation_vector - (\n (H_k @ state_estimate_k) + (\n sensor_noise_w_k))\n \n print(f'Observation={z_k_observation_vector}\\r\\n')\n \n # Calculate the measurement residual covariance\n S_k = H_k @ P_k @ H_k.T + R_k\n \n # Calculate the near-optimal Kalman gain\n # We use pseudoinverse since some of the matrices might be\n # non-square or singular.\n K_k = P_k @ H_k.T @ np.linalg.pinv(S_k)\n \n # Calculate an updated state estimate for time k\n state_estimate_k = state_estimate_k + (K_k @ measurement_residual_y_k)\n \n # Update the state covariance estimate for time k\n P_k = P_k - (K_k @ H_k @ P_k)\n \n # Print the best (near-optimal) estimate of the current state of the robot\n print(f'State Estimate After EKF={state_estimate_k}\\r\\n')\n \n # Return the updated state and covariance estimates\n return state_estimate_k, P_k", "def delta_kpoints(self):\n self.get_kpoints('no')\n ori_kp = self.kps\n omk = min(ori_kp)\n nmk = omk+self.diff\n self.kps = [v*nmk/omk for v in ori_kp]\n self.write_output()", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def find_k(model, data, tol = 1e-3):\n\n # Store the quantities that never change\n pow_mod = 10 ** model\n\n # Make the array x_k 10 times finer than the tolerance\n step = tol * 0.1\n k_arr = np.arange(0, 1 + step, step)\n\n # Get the x_k array\n x_k = np.array([(1 - k_arr) + k_arr * p for p in pow_mod]).T\n\n # And now the really useful arrays\n coef = (pow_mod - 1)/x_k * 2/np.log(10)\n log_x_k = np.log10(x_k)\n\n # Start with the extremes\n k0 = k_arr[0:1]\n k1 = k_arr[-1:]\n km = (k0 + k1) * 0.5\n\n first = True\n while True:\n\n # Each of the gradients\n gradm = get_one_gradient(k_arr, coef, log_x_k, data, km)\n if first:\n # If first time, we have to calculate everything\n grad0 = get_one_gradient(k_arr, coef, log_x_k, data, k0)\n grad1 = get_one_gradient(k_arr, coef, log_x_k, data, k1)\n\n first = False\n else:\n # Otherwise we can use the previous calculation\n grad0 = (sum0 * gradm + dif0 * grad0) * 0.5\n grad1 = (sum1 * gradm + dif1 * grad1) * 0.5\n\n # Get signs\n sign0 = np.sign(grad0)\n signm = np.sign(gradm)\n sign1 = np.sign(grad1)\n\n # Get differences\n sum0 = np.abs(signm + sign0)\n dif0 = np.abs(signm - sign0)\n sum1 = np.abs(signm + sign1)\n dif1 = np.abs(signm - sign1)\n\n # Vectorized change\n k0 = (sum0 * km + dif0 * k0) * 0.5\n k1 = (sum1 * km + dif1 * k1) * 0.5\n\n # Get the middle point\n new_km = (k0 + k1) * 0.5\n\n # Check if converged\n dif = np.abs(new_km[0] - km[0])\n if dif < tol:\n return new_km\n\n # Update\n km = new_km", "def KL_calc(self, Rs, q_mu):\n k_inv_mu = kron_mvp(self.K_invs, self.mu - q_mu)\n mu_penalty = np.sum(np.multiply(self.mu - q_mu, k_inv_mu))\n det_S = self.log_det_S(Rs)\n trace_term = self.calc_trace_term(Rs)[0]\n kl = 0.5 * (self.det_K - self.n - det_S +\n trace_term + mu_penalty)\n return max(0, kl)", "def rk8(accel,m,r,h,v): \n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (5/72.*k1r + 1/72.*k2r)*h)\n k3r = v + (5/72.*k1v + 1/72.*k2v)*h\n k4v = accel(m,r + (1/32.*k1r +3/32.*k3r)*h)\n k4r = v + (1/32.*k1v +3/32.*k3v)*h\n k5v = accel(m,r + (106/125.*k1r- 408/125.*k3r + 352/125.*k4r)*h)\n k5r = v + (106/125.*k1v- 408/125.*k3v + 352/125.*k4v)*h\n k6v = accel(m,r + (1/48.*k1r+ 8/33.*k4r - 125/528.*k5r)*h)\n k6r = v + (1/48.*k1v+ 8/33.*k4v - 125/528.*k5v)*h\n k7v = accel(m,r + (-13893*k1r+ 39936*k4r -64125*k5r+ 60720*k6r)*h/26411.)\n k7r = v +(-13893*k1v+ 39936*k4v -64125*k5v+ 60720*k6v)*h/26411.\n k8v = accel(m,r + (37/392.*k1r+ 1625/9408.*k5r -2/15.*k6r+ 61/6720*k7r)*h)\n k8r = v + (37/392.*k1v+ 1625/9408.*k5v -2/15.*k6v+ 61/6720*k7v)*h\n k9v = accel(m,r +(17176/25515.*k1r - 47104/25515.*k4r + 1325/504.*k5r - 41792/25515.*k6r + 20237/145800.*k7r + 4312/6075.*k8r)*h)\n k9r = v + (17176/25515.*k1v - 47104/25515.*k4v + 1325/504.*k5v - 41792/25515.*k6v + 20237/145800.*k7v + 4312/6075.*k8v)*h\n k10v = accel(m,r + ( -23834/180075.*k1r - 77824/1980825.*k4r- 636635/633864.*k5r + 254048/300125.*k6r - 183/7000.*k7r + 8/11.*k8r - 324/3773.*k9r)*h)\n k10r = v + ( -23834/180075.*k1v - 77824/1980825.*k4v- 636635/633864.*k5v + 254048/300125.*k6v - 183/7000.*k7v + 8/11.*k8v - 324/3773.*k9v)*h\n k11v= accel(m,r + (12733/7600.*k1r - 20032/5225.*k4r + 456485/80256.*k5r - 42599/7125.*k6r + 339227/912000.*k7r - 1029/4108.*k8r + 1701/1408.*k9r + 5145/2432.*k10r)*h)\n k11r = v + (12733/7600.*k1v - 20032/5225.*k4v + 456485/80256.*k5v - 42599/7125.*k6v + 339227/912000.*k7v - 1029/4108.*k8v + 1701/1408.*k9v + 5145/2432.*k10v)*h\n k12v = accel(m,r + h*(-27061/204120.*k1r + 40448/280665.*k4r -1353775/1197504.*k5r + 17662/25515.*k6r - 71687/1166400.*k7r + 98/225.*k8r + 1/16.*k9r + 3773/11664.*k10r))\n k12r = v + h*(-27061/204120.*k1v + 40448/280665.*k4v -1353775/1197504.*k5v + 17662/25515.*k6v - 71687/1166400.*k7v + 98/225.*k8v + 1/16.*k9v + 3773/11664.*k10v)\n k13v = accel(m,r + h*(11203/8680.*k1r - 38144/11935.*k4r + 2354425/458304.*k5r - 84046/16275.*k6r + 673309/1636800.*k7r + 4704/8525.*k8r + 9477/10912.*k9r - 1029/992.*k10r + 19/341.*k12r))\n k13r = v + h*(11203/8680.*k1v - 38144/11935.*k4v + 2354425/458304.*k5v - 84046/16275.*k6v + 673309/1636800.*k7v + 4704/8525.*k8v + 9477/10912.*k9v - 1029/992.*k10v + 19/341.*k12v)\n\n\n new_v8 = v + h*(13/288.*k1v +32/125.*k6v + 31213/144000.*k7v + 2401/12375.*k8v + 1701/14080.*k9v + 2401/19200.*k10v + 19/450.*k11v) \n new_r8 = r + h*(13/288.*k1r +32/125.*k6r + 31213/144000.*k7r + 2401/12375.*k8r + 1701/14080.*k9r + 2401/19200.*k10r + 19/450.*k11r) \n \n return new_v8,new_r8", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))", "def _K_compute_eq(self):\r\n t_eq = self._t[self._index==0]\r\n if self._t2 is None:\r\n if t_eq.size==0:\r\n self._K_eq = np.zeros((0, 0))\r\n return\r\n self._dist2 = np.square(t_eq[:, None] - t_eq[None, :])\r\n else:\r\n t2_eq = self._t2[self._index2==0]\r\n if t_eq.size==0 or t2_eq.size==0:\r\n self._K_eq = np.zeros((t_eq.size, t2_eq.size))\r\n return\r\n self._dist2 = np.square(t_eq[:, None] - t2_eq[None, :])\r\n \r\n self._K_eq = np.exp(-self._dist2/(2*self.lengthscale*self.lengthscale))\r\n if self.is_normalized:\r\n self._K_eq/=(np.sqrt(2*np.pi)*self.lengthscale)", "def get_best_k_cv(air_quality_model):\n\n locations = air_quality_model.air_quality_locations\n time_series = air_quality_model.air_quality_time_series\n\n for each_location in locations:\n\n other_locations = [i for i in locations if i != each_location]\n training_time_series = time_series[other_locations]\n scaled_training_time_series = air_quality_model.scaler.transform(training_time_series)\n training_time_series_dropna = scaled_training_time_series.dropna().T\n\n # k means determine k\n distortions = []\n K = range(1, len(other_locations) + 1, 1)\n for k in K:\n kmeans = KMeans(n_clusters=k, max_iter=300).fit(training_time_series_dropna)\n # err = sum(np.min(cdist(training_time_series_dropna, kmeans.cluster_centers_, 'euclidean'), axis=1)) \\\n # / training_time_series_dropna.shape[0]\n\n # Sum of squared distances of samples to their closest cluster center\n err = kmeans.inertia_\n distortions.append(err)\n print(k, dict(zip(other_locations, kmeans.labels_)))\n print(each_location, k, 'err=', err)\n\n # Plot the elbow\n plt.figure(figsize=(15, 20))\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title(str(each_location) + ' The Elbow Method showing the optimal k')\n plt.show()", "def get_keff(self, take_final=False):\n \n # find max in rec (or final value, don't force as default)\n if take_final:\n _rec_max = self.rec_curve[-1, 1]\n else:\n _rec_max = numpy.max(self.rec_curve[:, 1])\n \n _rec_min = self.rec_curve[0, 1] \n #Bit of a cheat - take the first point. Will be wrong in the case of \n #very fast recovery compared to 1st interval. But in this case, _rec_min and _rec_max \n #should be similar and caught below\n \n if _rec_min > 0.95 * _rec_max:\n print (\"No recovery because too little desensitization (fast limit)\")\n print (\"Setting k_eff = 1000\")\n self.k_eff = 1000 #We could certainly not measure a rate this fast\n \n else:\n _half_rec_amp = _rec_max - 0.5 * (_rec_max - _rec_min)\n _near_idx = (numpy.abs(self.rec_curve[:, 1] - _half_rec_amp)).argmin()\n _near_value = self.rec_curve [_near_idx, 1]\n\n #interpolate\n #must be a smarter way to combine the two possibilities?\n if _near_value > _half_rec_amp:\n #true half time was before our nearest neighbor\n _left = self.rec_curve[_near_idx - 1, 1]\n _right = self.rec_curve[_near_idx, 1]\n _tl = self.rec_curve[_near_idx - 1, 0]\n _tr = self.rec_curve[_near_idx, 0]\n #inverse of time difference scaled by normalized (point-threshold distance)\n self.k_eff = 1 / (_tr - (_tr - _tl) * float(_right - _half_rec_amp)/(_right - _left))\n\n elif _near_value < _half_rec_amp:\n #true half time was after our nearest neighbor\n _left = self.rec_curve[_near_idx, 1]\n _right = self.rec_curve[_near_idx + 1, 1]\n _tl = self.rec_curve[_near_idx, 0]\n _tr = self.rec_curve[_near_idx + 1, 0]\n #as above rearranged to approach from below.\n self.k_eff = 1 / (_tl + (_tr - _tl) * float(_half_rec_amp - _left)/(_right - _left))\n\n elif _near_value == _half_rec_amp:\n\n self.k_eff = 1 / self.rec_curve[near_hi_idx, 0]", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def get_k_gain(self, A, B, Q, R, output = 0):\n\n # solve the Ricatti equation\n X = matrix(scipy.linalg.solve_continuous_are(A, B, Q, R))\n\n # compute the LQR gain\n K = matrix((R)*(B.T*X))\n\n if output:\n print(\"X-axis K Matrix Gains: \")\n print(K[0])\n print(\"Y-axis K Matrix Gains: \")\n print(K[1])\n\n return K", "def gather_krec(self):\n \n #getting Qlo and Qhi the wrong way round here gives perfect flat s.s. recovery\n rec1 = Recovery_Qmade(self.m1.param, self.m1.N_states, \n self.m1.open_states, self.m1.Qlo, self.m1.Qhi, self.m1.P_init_hi, t_range=1e4)\n\n rec1.build_curve()\n rec1.get_keff()\n \n rec2 = Recovery_Qmade(self.m2.param, self.m2.N_states, \n self.m2.open_states, self.m2.Qlo, self.m2.Qhi, self.m2.P_init_hi, t_range=1e4)\n\n rec2.build_curve()\n rec2.get_keff()\n \n self.k_eff1 = rec1.k_eff\n self.k_eff2 = rec2.k_eff\n \n self.mean_keff = (rec1.k_eff + rec2.k_eff ) / 2", "def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n self.assertFloatEqual((D, Pval), (0.46, 3.801e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n self.assertFloatEqual((D, Pval), (0.46, 5.084e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n self.assertFloatEqual((D,Pval), (0.48, 0.001772), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n self.assertFloatEqual((D,Pval), (6.9388939039072284e-18, 1.), eps=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)", "def kge(self, return_all=False):\n cc = np.corrcoef(self.true, self.predicted)[0, 1]\n alpha = np.std(self.predicted) / np.std(self.true)\n beta = np.sum(self.predicted) / np.sum(self.true)\n return post_process_kge(cc, alpha, beta, return_all)", "def calc_K(tau, delta_t, var_n):\n var_f = 1. - var_n\n rval = var_f * np.exp(-(delta_t)**2 / (2. * tau**2))\n if delta_t == 0:\n rval += var_n\n return rval", "def improved_initialization(X, k):\n new_values = X.copy()\n best_like = float('-inf')\n MU, SIGMA, PI = None, None, None\n for _ in range(10):\n initial_means = get_initial_means(new_values, k)\n pi = np.full(k, 1 / k)\n while True:\n mu, clusters = k_means_step(new_values, k, initial_means)\n diff = np.sum(mu - initial_means)\n if not diff:\n sigma = compute_sigma(X, mu)\n break\n initial_means = mu\n mu, sigma, pi, res = train_model(X, k, default_convergence, (mu, sigma, pi))\n lk = likelihood(X, pi, mu, sigma, k)\n if lk > best_like:\n best_like = lk\n MU = mu\n SIGMA = sigma\n PI = pi\n return MU, SIGMA, PI", "def compute_kalman_gain(jacobian, oldCovariance, measurementCovariance):\n\n return None", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def kappas(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if a == n or d == n:\n k0, k1 = np.nan, np.nan\n elif b == n:\n k0, k1 = np.NINF, -0.0\n elif c == n:\n k0, k1 = -0.0, np.NINF\n elif p1 == n or q2 == n:\n k0, k1 = np.nan, 0.0\n elif p2 == n or q1 == n:\n k0, k1 = 0.0, np.nan\n else:\n cov = self.covar()\n p2_q1, p1_q2 = p2 * q1, p1 * q2\n k0, k1 = _div(cov, p2_q1), _div(cov, p1_q2)\n\n return k0, k1, self.kappa()", "def metropolis_accept(self, old_protein, new_protein, T):\n delta_E = self.compute_energy(new_protein) - self.compute_energy(old_protein)\n \n if delta_E <= 0: \n p = 1\n else: \n p = math.e**(-delta_E/T)\n return(p)", "def KPMO(XVal,YVal_State_1,YVal_State_2,YVal_State_3,XVal_Mean_Trans_1,XVal_Mean_Trans_2,XVal_Sig_Trans_1,XVal_Sig_Trans_2,iOpt):\n#\t1. Computations:\n\tTiny=1E-20\n\tP_Trans_1 = fCPD(XVal,XVal_Mean_Trans_1, XVal_Sig_Trans_1) # Transition of kerogen from State #1 to State #2\n\tP_Trans_2 = fCPD(XVal,XVal_Mean_Trans_2, XVal_Sig_Trans_2) # Transition of kerogen from State #2 to State #3\n\tFunVal=0\n\tif(iOpt==0):\n\t\tP_State_1=(1-P_Trans_1)*(1-P_Trans_2)\n\t\tP_State_2=P_Trans_1*(1 - P_Trans_2)\n\t\tP_State_3=1-P_State_1-P_State_2\n\t\tFunVal=(YVal_State_1*P_State_1)+(YVal_State_2*P_State_2)+(YVal_State_3*P_State_3)\n\tif(iOpt==1):\n\t\tFunVal=YVal_State_1+P_Trans_1*YVal_State_2+P_Trans_2*YVal_State_3\n\tif(FunVal==0):\n\t\tFunVal=Tiny\n\treturn FunVal", "def EstimateKFTimeStep(u1,y1,z0,Xxd,Xud,Yx,Yu,P0,Q,R):\n \n # estimate next step\n z1m = Xxd.dot(z0) + Xud.dot(u1)\n y1hat = Yx.dot(z1m) + Yu.dot(u1)\n P1m = (Xxd.dot(P0)).dot(Xxd.T) + Q\n \n # Calculate Kalman gain\n # same as Lk from [1] - And their Rtilde_k is G*P1m*G'+R\n Kk = np.dot(P1m,Yx.T).dot( np.linalg.inv(((Yx.dot(P1m)).dot(Yx.T) + R))) \n # update estimate with measurement\n z1 = z1m + Kk.dot(y1 - y1hat)\n \n P1 = (np.eye(Xxd.shape[0]) - Kk.dot(Yx) ).dot(P1m)\n return z1,P1,Kk", "def epsilon_fit_Chang(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_l1,f_l2,f_t1,f_t2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_t1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_t2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(((1/2)*(1/epsinf1 + 1/epsinf2)) + (f_l1*(vl1**2 - vt1**2))/(-vl1**2 + v**2 + 1j*v*gl1) + (f_l2*(vl2**2 - vt2**2))/(-vl2**2 + v**2 + 1j*v*gl2))\n \n# eps1 = epsinf1*(1 - (f_t1*(vl1**2 - vt1**2))/(vt1**2 - v**2 - 1j*v*gt1))\n# eps2 = epsinf2*(1 - (f_t2*(vl2**2 - vt2**2))/(vt2**2 - v**2 - 1j*v*gt2))\n# epsx = (1/2)*(eps1+eps2)\n# epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def epsilon_fit_Chang_homemade(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_t1,f_t2,f_l1,f_l2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(1/((epsinf1+epsinf2)/2) + (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) + (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2))\n #epsx = (1/2)*(eps1+eps2)\n #epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n #epsx = (1/2)*epsinf1*(1-(f_t1*(v**2 - vl1**2 + 1j*v*gl1)/(v**2 - vt1**2 + 1j*v*gt1))-\\\n # (f_t2*(v**2 - vl2**2 + 1j*v*gl2)/(v**2 - vt2**2 + 1j*v*gt2)))\n #epsz = 1/((1/2)*(1/epsinf2)*(1+(f_l1*(v**2 - vt1**2 +1j*v*gl1)/(v**2 - vl1**2 +1j*v*gl1))+\\\n # (f_l2*(v**2 - vt2**2 +1j*v*gl2)/(v**2 - vl2**2 +1j*v*gl2))))\n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def test_k_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n ki = k_index(pressure, temperature, dewpoint)\n assert_almost_equal(ki, 33.5 * units.degC, 2)", "def converged(old, new):\n # https://github.com/amirgholami/PyHessian/commit/0f7e0f63a0f132998608013351ba19955fc9d861#diff-ba06409ffbc677fe556485172e62649fe7a069631390f5a780766bff3289b06bR149-R150 # noqa: B950\n return (old - new).abs() / (old.abs() + 1e-6) < tol", "def KRC(self, ik, ipd, ipl, t):\n idx = ik - 1\n\n den1 = 1 - self.delta[idx] * self.coca.PK(ik, t)\n num1 = self.delta[idx] * self.thetak[idx]\n ins = num1 / den1\n\n for l in np.arange(0, self.L):\n pl = self.coca.PL(l, t)\n ins += ((self.thetal[l] * self.gamma[l][idx]) / (1 - pl))\n\n ans = ipd * np.exp(t * ipl) * ins\n\n return ans", "def jansky_to_kelvin(self):\n this_unit = self.stokes.unit\n if self.component_type == \"point\":\n if this_unit.is_equivalent(\"K sr\"):\n return\n\n else:\n if this_unit.is_equivalent(\"K\"):\n return\n\n if self.spectral_type == \"spectral_index\" or (\n self.spectral_type == \"flat\" and self.reference_frequency is not None\n ):\n conv_factor = skyutils.jy_to_ksr(self.reference_frequency)\n conv_factor = np.repeat(\n np.repeat(conv_factor[np.newaxis, np.newaxis, :], 4, axis=0),\n self.Nfreqs,\n axis=1,\n )\n elif self.freq_array is not None:\n conv_factor = skyutils.jy_to_ksr(self.freq_array)\n conv_factor = np.repeat(\n np.repeat(conv_factor[np.newaxis, :, np.newaxis], 4, axis=0),\n self.Ncomponents,\n axis=2,\n )\n else:\n raise ValueError(\n \"Either reference_frequency or freq_array must be set to convert to K.\"\n )\n\n self.stokes = self.stokes * conv_factor\n if self.stokes_error is not None:\n self.stokes_error = self.stokes_error * conv_factor\n\n if self.frame_coherency is not None:\n self.calc_frame_coherency()", "def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)", "def advance(self):\n\n f, n, rtol, atol, neq = \\\n self.f, self.n, self.rtol, self.atol, self.neq\n u_n, t_n, t_next = self.u[n], self.t[n], self.t[n+1]\n dt = t_next - t_n\n\n first_step = dt # try one big step to next desired level\n\n def middle(x,y,z): # Auxilary function\n return sorted([x,y,z])[1]\n\n # Extract coefficients from Butcher-tableau\n table = self._butcher_tableau\n k_len = table.shape[1] - 1 # number of internal stages\n\n # coefficients for internal stages\n factors_u = np.asarray(table[:k_len, 1:])\n # coefficients for t\n factors_t = table[:k_len, 0]\n # coefficients for u_new\n factors_u_new = table[k_len, 1:]\n\n # coefficients for local error between 2 levels\n factors_error = table[k_len+1, 1:] - factors_u_new\n\n u_intermediate = [u_n,]\n t_intermediate = [t_n,]\n u, t, h = u_n, t_n, first_step # initial values\n k = np.zeros((k_len, self.neq), self.dtype) # intern stages\n\n if self.verbose > 0:\n print 'advance solution in [%s, %s], h=%g' % (t_n, t_next, h)\n\n # Loop until next time point is reached\n while (abs(t - t_n) < abs(t_next - t_n)):\n u, t = u_intermediate[-1], t_intermediate[-1]\n\n # Internal steps\n k[:, :] = 0. # initialization for next step\n for m in range(k_len):\n k_factors = (np.dot(factors_u, k))[m]\n #print u, u+h*k_factors, f(u+h*k_factor, 0.5), self.dtype\n k[m] = f(u+h*k_factors, t+h*factors_t[m])\n u_new = u + h*(np.dot(factors_u_new, k))\n\n self.info['rejected'] += 1 # reduced below if accepted\n if self.verbose > 0:\n print ' u(t=%g)=%g: ' % (t+h, u_new),\n\n # local error between 2 levels\n error = h*np.abs(np.dot(factors_error, k))\n # Acceptable error tolerance\n tol = rtol*np.abs(u_new) + atol\n\n accurate = (error <= tol).all()\n\n if accurate or h <= self.min_step or h >= self.max_step:\n # Accurate enough,\n # or the step size exceeds valid range,\n # must accept this solution\n u_intermediate.append(u_new)\n t_intermediate.append(t+h)\n if not self.disk_storage:\n self.u_all.append(u_new)\n self.t_all.append(t+h)\n self.info['rejected'] -= 1\n\n if self.verbose > 0:\n print 'accepted, ',\n else:\n if self.verbose > 0:\n print 'rejected, ',\n\n if self.verbose > 0:\n print 'err=%s, ' % str(error),\n if hasattr(self, 'u_exact') and callable(self.u_exact):\n print 'exact-err=%s, ' % \\\n (np.asarray(self.u_exact(t+h))-u_new),\n if h <= self.min_step:\n print 'h=min_step!! ',\n\n\n # Replace 0 values by 1e-16 since we will divide by error\n error = np.asarray([(1e-16 if x == 0. else x) \\\n for x in error])\n\n # Normarized error rate\n rms = error/tol\n rms_norm = np.sqrt(np.sum(rms*rms)/self.neq)\n\n order = float(self._method_order[0])\n # factor to adjust the size of next step\n # Formula is from <Numerical Methods for Engineers,\n # Chappra & Cannle>\n s = .8 *((1./rms_norm)**(1/order))\n # scalar should be in range(0.1, 4.)\n # for better accuracy and smoothness\n s = middle(s, 0.1, 4.0)\n h *= s\n\n # step size should be in range [min_step, max_step]\n h = middle(h, self.min_step, self.max_step)\n # adjust h to fit the last step\n h = min(h, t_next - t_intermediate[-1])\n\n if self.verbose > 0:\n print 'new h=%g' % h\n\n if h == 0:\n break\n\n return u_new", "def k_Sw07(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.27 * U2) * (660 / Sc) ** 0.5\n\n return k", "def check_solution(self, new_protein):\n new_score = new_protein.getscore()\n old_value = self.best_value\n\n if new_score >= old_value:\n self.best_solution = new_protein\n self.best_value = new_score", "def _adjust_k(scheme, k, ef=EF):\n scheme = rx.core._check_scheme(scheme) # noqa: SLF001\n is_half_equilibrium = np.asarray(scheme.is_half_equilibrium)\n k = np.asarray(k, dtype=np.float64).copy()\n\n if np.any(is_half_equilibrium):\n # at least one equilibrium\n if np.any(~is_half_equilibrium):\n # at least one true reaction\n\n k_slowest_equil = k[is_half_equilibrium].min()\n k_fastest_react = k[~is_half_equilibrium].max()\n adjustment = ef * (k_fastest_react / k_slowest_equil)\n\n k[is_half_equilibrium] *= adjustment\n logger.warning(f\"equilibria adjustment = {adjustment}\") # noqa: G004\n\n k_slowest_equil = k[is_half_equilibrium].min()\n k_fastest_react = k[~is_half_equilibrium].max()\n logger.warning(\n f\"slow eq. / fast r. = {k_slowest_equil / k_fastest_react}\", # noqa: E501, G004\n )\n else:\n # only equilibria\n\n # set the smallest one to be equal to one\n k = k / k.min()\n\n return jnp.asarray(k)", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n assert_allclose((D, Pval), (0.46, 3.801e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n assert_allclose((D, Pval), (0.46, 5.084e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n assert_allclose((D, Pval), (0.48, 0.001772), rtol=1e-3)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n assert_allclose((D, Pval), (6.9388939039072284e-18, 1.0), rtol=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)", "def adjust_k(epoch, initial_k, increase_k, max_violation=False):\n if max_violation:\n return 1.\n\n return min(initial_k + (increase_k * epoch), 1.)", "def kl(self, old_dist_info, new_dist_info):\n old_prob = old_dist_info[\"prob\"]\n new_prob = new_dist_info[\"prob\"]\n return np.sum(\n old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),\n axis=2\n )", "def kf_algorithm(self, u, y):\n # For the linear filter, x_estimated_state is the difference to the operating point\n cov_matrix_before = self.cov_matrix\n # 0. Calculate difference to operating point\n u = u - self.u_op\n if self.model_type == ModelType.EASY:\n x_est_before = self.x_estimated_state - self.operating_point[0:6].reshape((6, 1))\n # x_est_before.reshape((6, 1))\n else:\n x_est_before = self.x_estimated_state - self.operating_point.reshape((8, 1))\n # x_est_before.reshape((8, 1))\n if self.nOutputs == 3:\n y = y - self.operating_point[0:3].reshape(3, 1)\n elif self.nOutputs == 5:\n y = y - np.concatenate((self.operating_point[0:3], self.operating_point[6:8])).reshape(5, 1)\n # x_est_before = self.x_estimated_state - self.operating_point\n # 1. Prediction\n # predict the state by using the linearized system at the fixed operating point\n v_s = u[0][0] + u[1][0]\n v_d = u[0][0] - u[1][0]\n x_est_predict = self.Ak @ x_est_before + self.Bk @ u\n # predict the new covariance\n cov_matrix_predict = (self.Ak @ cov_matrix_before @ np.transpose(self.Ak)\n + self.Bk @ self.N @ np.transpose(self.Bk))\n # 2. Update\n # compute kalman gain\n Kl = (cov_matrix_predict @ np.transpose(self.Ck) @\n np.linalg.inv(self.Ck @ cov_matrix_predict @ np.transpose(self.Ck) + self.W))\n # update state\n if self.nOutputs == 3:\n y_est = x_est_predict[0:3,]\n elif self.nOutputs == 5:\n y_est = np.concatenate((x_est_predict[0:3], x_est_predict[6:8]))\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ self.Ck) @ cov_matrix_predict\n # add again the operating point\n if self.model_type == ModelType.EASY:\n x_estimated_state = x_est_update + self.operating_point[0:6].reshape((6, 1))\n # self.x_estimated_state = x_estimated_state.reshape((1, 6))[0]\n else:\n x_estimated_state = x_est_update + self.operating_point.reshape((8, 1))\n # self.x_estimated_state = x_estimated_state.reshape((1, 8))[0]\n\n if self.should_check_limits:\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n corrected_state = self.heliSim.get_limited_state_and_change_state_without_sim(np.transpose(x_estimated_state)[0],\n self.model_type)\n x_estimated_state = np.resize(corrected_state, (self.nStateVars, 1))\n self.x_estimated_state = x_estimated_state\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_estimated_state", "def K(self, X, Xstar):\n r = l2norm_(X, Xstar)\n bessel = kv(self.v, np.sqrt(2 * self.v) * r / self.l)\n f = 2 ** (1 - self.v) / gamma(self.v) * (np.sqrt(2 * self.v) * r / self.l) ** self.v\n res = f * bessel\n res[np.isnan(res)] = 1\n res = self.sigmaf * res + self.sigman * kronDelta(X, Xstar)\n return (res)", "def test_klauder(self):\n ideal = np.array([0.14899879, -0.16633309, -0.42806931, 0.16605633,\n 0.70769336, 0.16605633, -0.42806931, -0.16633309])\n actual = misc.klauder(8)\n np.testing.assert_allclose(ideal, actual, atol=1e-8, rtol=1e-8)", "def get_old_solution(self, x, k=None, get_ctrl_traj=False):\n if self.n_fail > self.n_safe:\n warnings.warn(\n \"There are no previous solution to be applied. Returning None\")\n return None\n if k is None:\n k = self.n_fail\n\n if k < 1:\n warnings.warn(\"Have to shift at least one timestep back\")\n return None\n\n k_fb_old = self.k_fb_safe_all[k - 1]\n k_ff = self.k_ff_safe[k - 1, :, None]\n p_safe = self.p_safe[k - 1, :, None]\n\n u_apply = feedback_ctrl(x, k_ff, k_fb_old, p_safe)\n if get_ctrl_traj:\n k_fb_safe_traj = None\n k_ff_safe_traj = u_apply\n p_ctrl_safe_traj = None\n\n if k < self.n_safe:\n k_fb_safe_traj = self.k_fb_safe_all[k:, :]\n # in accordance to the structure current ctrl u_apply is part of the k_ff ctrl trajectory\n k_ff_safe_traj = np.vstack((u_apply, self.k_ff_safe[k + 1:, :]))\n p_ctrl_safe_traj = self.p_safe[k:, :]\n\n return u_apply, k_fb_safe_traj, \\\n k_ff_safe_traj, p_ctrl_safe_traj\n\n return u_apply", "def diff_k_p1(p1, p2, t=0.):\n return p1", "def _calc_k_gain(self, cov_m, h_tilde, R_cov):\n B = np.matmul(cov_m, np.transpose(h_tilde))\n G = np.matmul(h_tilde, np.matmul(cov_m,\n np.transpose(h_tilde)))\n T = np.linalg.inv(np.add(G, R_cov))\n\n return np.matmul(B, T)", "def calc_ST_operation(m_exhaust_GT_kgpers, T_exhaust_GT_K, T_sup_K, fuel_type):\n\n # calaulate High Pressure (HP) and Low Pressure (LP) mass flow of a double pressure steam turbine\n temp_i_K = (0.9 * ((6 / 48.2) ** (0.4 / 1.4) - 1) + 1) * (T_exhaust_GT_K - ST_DELTA_T)\n if fuel_type == 'NG':\n Mexh = 103.7 * 44E-3 + 196.2 * 18E-3 + 761.4 * 28E-3 + 200.5 * (CC_AIRRATIO - 1) * 32E-3 \\\n + 200.5 * (CC_AIRRATIO - 1) * 3.773 * 28E-3\n ncp_exh = 103.7 * 44 * 0.846 + 196.2 * 18 * 1.8723 + 761.4 * 28 * 1.039 \\\n + 200.5 * (CC_AIRRATIO - 1) * 32 * 0.918 + 200.5 * (CC_AIRRATIO - 1) * 3.773 * 28 * 1.039\n cp_exh = ncp_exh / Mexh # J/kgK\n else:\n Mexh = 98.5 * 44E-3 + 116 * 18E-3 + 436.8 * 28E-3 + 115.5 * (CC_AIRRATIO - 1) * 32E-3 \\\n + 115.5 * (CC_AIRRATIO - 1) * 3.773 * 28E-3\n ncp_exh = 98.5 * 44 * 0.846 + 116 * 18 * 1.8723 + 436.8 * 28 * 1.039 \\\n + 115.5 * (CC_AIRRATIO - 1) * 32 * 0.918 + 115.5 * (CC_AIRRATIO - 1) * 3.773 * 28 * 1.039\n cp_exh = ncp_exh / Mexh # J/kgK\n\n a = np.array([[1653E3 + HEAT_CAPACITY_OF_WATER_JPERKGK * (T_exhaust_GT_K - ST_DELTA_T - 534.5), \\\n HEAT_CAPACITY_OF_WATER_JPERKGK * (temp_i_K - 534.5)], \\\n [HEAT_CAPACITY_OF_WATER_JPERKGK * (534.5 - 431.8), \\\n 2085.8E3 + HEAT_CAPACITY_OF_WATER_JPERKGK * (534.5 - 431.8)]])\n b = np.array([m_exhaust_GT_kgpers * cp_exh * (T_exhaust_GT_K - (534.5 + ST_DELTA_T)), \\\n m_exhaust_GT_kgpers * cp_exh * (534.5 - 431.8)])\n [mdotHP_kgpers, mdotLP_kgpers] = np.linalg.solve(a, b)\n\n # calculate thermal output\n T_cond_0_K = T_sup_K + CC_DELTA_T_DH # condensation temperature constrained by the DH network temperature\n pres0 = (0.0261 * (T_cond_0_K - 273) ** 2 - 2.1394 * (T_cond_0_K - 273) + 52.893) * 1E3\n\n delta_h_evap_Jperkg = (-2.4967 * (T_cond_0_K - 273) + 2507) * 1E3\n q_output_ST_W = (mdotHP_kgpers + mdotLP_kgpers) * delta_h_evap_Jperkg # thermal output of ST\n\n # calculate electricity output\n h_HP_Jperkg = (2.5081 * (T_exhaust_GT_K - ST_DELTA_T - 273) + 2122.7) * 1E3 # J/kg\n h_LP_Jperkg = (2.3153 * (temp_i_K - 273) + 2314.7) * 1E3 # J/kg\n h_cond_Jperkg = (1.6979 * (T_cond_0_K - 273) + 2506.6) * 1E3 # J/kg\n\n el_produced_ST_W = mdotHP_kgpers * (h_HP_Jperkg - h_LP_Jperkg) + \\\n (mdotHP_kgpers + mdotLP_kgpers) * (h_LP_Jperkg - h_cond_Jperkg) # turbine electricity output\n\n el_input_compressor_W = SPEC_VOLUME_STEAM * (\n mdotLP_kgpers * (6E5 - pres0) + (mdotHP_kgpers + mdotLP_kgpers) * (48.2E5 - 6E5)) # compressor electricity use\n\n el_output_ST_W = ST_GEN_ETA * (el_produced_ST_W - el_input_compressor_W) # gross electricity production of turbine\n\n return q_output_ST_W, el_output_ST_W", "def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k", "def kl_sym(self, old_dist_info_vars, new_dist_info_vars):\n old_prob_var = old_dist_info_vars[\"prob\"]\n new_prob_var = new_dist_info_vars[\"prob\"]\n # Assume layout is N * T * A\n return tf.reduce_sum(\n old_prob_var * (tf.log(old_prob_var + TINY) - tf.log(new_prob_var + TINY)),\n axis=2\n )", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def __kappa_mle(self, k, R):\n return (iv(1, k) / iv(0, k)) - R", "def quasi_optimalityTV(f, lam_init = 2.0, q = 0.9):\n \n lam = lam_init\n max_iter = 50\n error = np.zeros(max_iter)\n #alt_error = np.zeros(max_iter)\n u_old = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n for i in range(1, max_iter):\n lam = lam_init * (q ** i)\n u_new = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n error[i] = np.linalg.norm(u_old - u_new)\n #alt_error[i] = np.linalg.norm(u_old - u_new) /abs(lam_init*(q ** i - q ** (i-1)))\n u_old = np.copy(u_new)\n\n #plt.plot(error)\n #plt.plot(alt_error)\n #plt.show()\n opt_idx = np.argmin(error[error != 0.0])\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u= ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n \n return u, t", "def initial_estimator(f, x, step,k):\n\n fx = f(x)\n\n if decide(fx > 0):\n sign1 = 1\n else:\n sign1 = -1\n k_step = k\n h = fx / derivative(f, x)\n\n for k1 in range(1, 50000):\n step = step + 1\n x_new = x - k_step * h\n k_step = k_step * 2 # make the k double in each iteration\n fx_new = f(x_new)\n if decide(fx_new > 0):\n sign2 = 1\n else:\n sign2 = -1\n\n if not (sign1 == sign2):\n return x_new, step\n\n print(\"limit need to Increase\")", "def elo(old, exp, score, k=10):\n return old + k * (score - exp)", "def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (8, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = x_est_predict[0:3]\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 8))[0],\n ModelType.GYROMOMENT)\n x_est_update = np.resize(corrected_state, (8, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_est_update", "def calc_q_gain(Tfl, Tabs, q_rad_Whperm2, DT, Tin, Tout, aperture_area_m2, c1, c2, Mfl, delts, Cp_waterglycol, C_eff, Te):\n\n xgain = 1\n xgainmax = 100\n exit = False\n while exit == False:\n qgain_Whperm2 = q_rad_Whperm2 - c1 * (DT[1]) - c2 * abs(DT[1]) * DT[1] # heat production from solar collector, eq.(5)\n\n if Mfl > 0:\n Tout = ((Mfl * Cp_waterglycol * Tin) / aperture_area_m2 - (C_eff * Tin) / (2 * delts) + qgain_Whperm2 + (\n C_eff * Tfl[1]) / delts) / (Mfl * Cp_waterglycol / aperture_area_m2 + C_eff / (2 * delts)) # eq.(6)\n Tfl[2] = (Tin + Tout) / 2\n DT[2] = Tfl[2] - Te\n qdiff = Mfl / aperture_area_m2 * Cp_waterglycol * 2 * (DT[2] - DT[1])\n else:\n Tout = Tfl[1] + (qgain_Whperm2 * delts) / C_eff # eq.(8)\n Tfl[2] = Tout\n DT[2] = Tfl[2] - Te\n qdiff = 5 * (DT[2] - DT[1])\n\n if abs(qdiff < 0.1):\n DT[1] = DT[2]\n exit = True\n else:\n if xgain > 40:\n DT[1] = (DT[1] + DT[2]) / 2\n if xgain == xgainmax:\n exit = True\n else:\n DT[1] = DT[2]\n xgain += 1\n\n # FIXME: redundant...\n # qout = Mfl * Cp_waterglycol * (Tout - Tin) / aperture_area\n # qmtherm = (Tfl[2] - Tfl[1]) * C_eff / delts\n # qbal = qgain - qout - qmtherm\n # if abs(qbal) > 1:\n # qbal = qbal\n return qgain_Whperm2", "def test_thermal_relaxation_error_kraus(self):\n t1, t2, time, p1 = (1, 2, 1, 0.3)\n error = thermal_relaxation_error(t1, t2, time, p1)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n self.assertEqual(circ[0]['name'], 'kraus')\n self.assertEqual(circ[0]['qubits'], [0])", "def obrien_fleming_test(z, K, current_k, alpha):\n c = obrien_fleming_cutoff(K, current_k, alpha)\n return c < abs(z)", "def calculate_kl(self, old_policy, new_policy, obs):\n # _, logp0 = old_policy(obs, act)\n # logp0 = logp0.detach()\n # _, logp1 = new_policy(obs, act)\n\n p0 = old_policy._distribution(obs).probs.detach() + 1e-8\n p1 = new_policy._distribution(obs).probs\n # pi_new = Categorical(old_policy.logits_net(obs))\n # act_new = pi_new.sample()\n # logp0 = old_policy._log_prob_from_distribution(pi_old, act_old)\n # logp1 = new_policy._log_prob_from_distribution(pi_new, act_new)\n\n return torch.sum(p0 * torch.log(p0 / p1), 1).mean()\n # kl = torch.exp(logp1) * (logp1-logp0)\n # return kl.sum(-1, keepdim=True).mean()", "def __init__(self, kp, ki, kd):\n self.kp = kp\n self.ki = ki\n self.kd = kd\n self.error_last = 0\n self.error_sum = 0\n self.delta_error = 0", "def K(self):\n\n # Calculate and return the stiffness matrix in global coordinates\n return matmul(matmul(inv(self.T()), self.k()), self.T())", "def _K_computations(self, X, X2):\r\n if self.ARD:\r\n pass\r\n else:\r\n if X2 is None:\r\n self._K_inner_prod = np.dot(X,X.T)\r\n self._K_numer = self._K_inner_prod*self.weight_variance+self.bias_variance\r\n vec = np.diag(self._K_numer) + 1.\r\n self._K_denom = np.sqrt(np.outer(vec,vec))\r\n self._K_asin_arg = self._K_numer/self._K_denom\r\n self._K_dvar = four_over_tau*np.arcsin(self._K_asin_arg)\r\n else:\r\n self._K_inner_prod = np.dot(X,X2.T)\r\n self._K_numer = self._K_inner_prod*self.weight_variance + self.bias_variance\r\n vec1 = (X*X).sum(1)*self.weight_variance + self.bias_variance + 1.\r\n vec2 = (X2*X2).sum(1)*self.weight_variance + self.bias_variance + 1.\r\n self._K_denom = np.sqrt(np.outer(vec1,vec2))\r\n self._K_asin_arg = self._K_numer/self._K_denom\r\n self._K_dvar = four_over_tau*np.arcsin(self._K_asin_arg)", "def testEpsK1Changes(self):\n with self.test_context() as session:\n initial_eps = 1e-3\n num_classes = 5\n rm = gpflow.likelihoods.RobustMax(num_classes, initial_eps)\n\n expected_eps_k1 = initial_eps / (num_classes - 1.)\n actual_eps_k1 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k1, actual_eps_k1)\n\n new_eps = 0.412\n rm.epsilon.assign(new_eps, session=session)\n expected_eps_k2 = new_eps / (num_classes - 1.)\n actual_eps_k2 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k2, actual_eps_k2)", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def _K_computations(self, X, X2):\r\n # First extract times and indices.\r\n self._extract_t_indices(X, X2)\r\n\r\n self._K_compute_eq()\r\n self._K_compute_ode_eq()\r\n if X2 is None:\r\n self._K_eq_ode = self._K_ode_eq.T\r\n else:\r\n self._K_compute_ode_eq(transpose=True)\r\n self._K_compute_ode()\r\n\r\n if X2 is None:\r\n self._K_dvar = np.zeros((self._t.shape[0], self._t.shape[0]))\r\n else:\r\n self._K_dvar = np.zeros((self._t.shape[0], self._t2.shape[0]))\r\n\r\n # Reorder values of blocks for placing back into _K_dvar.\r\n self._K_dvar = np.vstack((np.hstack((self._K_eq, self._K_eq_ode)),\r\n np.hstack((self._K_ode_eq, self._K_ode))))\r\n self._K_dvar = self._K_dvar[self._rorder, :]\r\n self._K_dvar = self._K_dvar[:, self._rorder2]\r\n \r\n \r\n if X2 is None:\r\n # Matrix giving scales of each output\r\n self._scale = np.zeros((self._t.size, self._t.size))\r\n code=\"\"\"\r\n for(int i=0;i<N; i++){\r\n scale_mat[i+i*N] = B[index[i]+output_dim*(index[i])];\r\n for(int j=0; j<i; j++){\r\n scale_mat[j+i*N] = B[index[i]+output_dim*index[j]];\r\n scale_mat[i+j*N] = scale_mat[j+i*N];\r\n }\r\n }\r\n \"\"\"\r\n scale_mat, B, index = self._scale, self.B, self._index\r\n N, output_dim = self._t.size, self.output_dim\r\n weave.inline(code,['index',\r\n 'scale_mat', 'B',\r\n 'N', 'output_dim'])\r\n else:\r\n self._scale = np.zeros((self._t.size, self._t2.size))\r\n code = \"\"\"\r\n for(int i=0; i<N; i++){\r\n for(int j=0; j<N2; j++){\r\n scale_mat[i+j*N] = B[index[i]+output_dim*index2[j]];\r\n }\r\n }\r\n \"\"\"\r\n scale_mat, B, index, index2 = self._scale, self.B, self._index, self._index2\r\n N, N2, output_dim = self._t.size, self._t2.size, self.output_dim\r\n weave.inline(code, ['index', 'index2',\r\n 'scale_mat', 'B',\r\n 'N', 'N2', 'output_dim'])", "def P(self, energy, newEnergy, temperature):\n \"\"\" This is the decision-rule, adapted from Nascimento, et al., 2009 (See references) \"\"\"\n \n delta = self.calcDelta(newEnergy, energy)\n\n minTemp = 0.00001 # use minimum to avoid div/0 and buffer overflow\n if temperature == 0:\n return minTemp\n elif temperature > minTemp:\n try:\n return math.exp(-1 * round(delta, 4) / round(temperature, 4))\n except OverflowError as detail:\n return minTemp\n else:\n return 1", "def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))\r\n Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.]\r\n db_dlen = [0., 4*self.b[1]/self.lengthscale, 2*self.b[2]/self.lengthscale, 2*self.b[3]/self.lengthscale, 2*self.b[4]/self.lengthscale]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)), da_dlen[1]*self.basis_omega, da_dlen[2]*self.basis_omega**2, da_dlen[3]*self.basis_omega**3))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dlower_terms_dlen = db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F2lower,F2lower.T) + db_dlen[2]*np.dot(F1lower,F1lower.T) + db_dlen[3]*np.dot(F2lower,Flower.T) + db_dlen[4]*np.dot(Flower,F2lower.T)\r\n dG_dlen = 15*self.lengthscale**4/(400*np.sqrt(5))*Gint + 3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dlen + dlower_terms_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period, -self.a[3]*self.basis_omega**4/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2,self.basis_phi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n #IPPprim2[0,0] = 2*(self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period, -3*self.a[3]*self.basis_omega**3/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF2lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**3/self.period,self.basis_omega,self.basis_phi+np.pi*3/2)(self.lower) + self._cos(-2*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n dlower_terms_dper = self.b[0] * (np.dot(dFlower_dper,Flower.T) + np.dot(Flower.T,dFlower_dper))\r\n dlower_terms_dper += self.b[1] * (np.dot(dF2lower_dper,F2lower.T) + np.dot(F2lower,dF2lower_dper.T)) - 4*self.b[1]/self.period*np.dot(F2lower,F2lower.T)\r\n dlower_terms_dper += self.b[2] * (np.dot(dF1lower_dper,F1lower.T) + np.dot(F1lower,dF1lower_dper.T)) - 2*self.b[2]/self.period*np.dot(F1lower,F1lower.T)\r\n dlower_terms_dper += self.b[3] * (np.dot(dF2lower_dper,Flower.T) + np.dot(F2lower,dFlower_dper.T)) - 2*self.b[3]/self.period*np.dot(F2lower,Flower.T)\r\n dlower_terms_dper += self.b[4] * (np.dot(dFlower_dper,F2lower.T) + np.dot(Flower,dF2lower_dper.T)) - 2*self.b[4]/self.period*np.dot(Flower,F2lower.T)\r\n\r\n dG_dper = 1./self.variance*(3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dper + 0.5*dlower_terms_dper)\r\n dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)\r\n\r\n # np.add(target[:,:,0],dK_dvar, target[:,:,0])\r\n target[0] += np.sum(dK_dvar*dL_dK)\r\n #np.add(target[:,:,1],dK_dlen, target[:,:,1])\r\n target[1] += np.sum(dK_dlen*dL_dK)\r\n #np.add(target[:,:,2],dK_dper, target[:,:,2])\r\n target[2] += np.sum(dK_dper*dL_dK)", "def E2K(E):\n return sqrt(E/2.0723)", "def jarrow_rudd(s, k, t, v, rf, cp, am=False, n=100):\n\n\t# Basic Calculations\n\th = t / n \n\tu = math.exp((rf - 0.5 * math.pow(v, 2)) * h + v * math.sqrt(h))\n\td = math.exp((rf - 0.5 * math.pow(v, 2)) * h - v * math.sqrt(h))\n\tdrift = math.exp(rf * h)\n\tq = (drift - d) / (u - d)\n\n\t#Process the terminal stock price\n\tstkval = np.zeros((n+1, n+1))\n\toptval = np.zeros((n+1, n+1))\n\tstkval[0, 0] = s\n\n\tfor i in range(1, n+1):\n\t\tstkval[i, 0] = stkval[i - 1, 0] * u\n\t\tfor j in range(1, i + 1):\n\t\t\tstkval[i, j] = stkval[i - 1, j - 1] * d\n\n\t# Backwards recursion for option price\n\tfor j in range(n + 1):\n\t\toptval[n, j] = max(0, cp * (stkval[n, j] - k))\n\tfor i in range(n - 1, -1, -1):\n\t\tfor j in range(i + 1):\n\t\t\toptval[i, j] = (q * optval[i + 1, j] + (1 - q) * optval[i + 1, j + 1]) / drift\n\t\t\tif am:\n\t\t\t\toptval[i, j] = max(optval[i, j], cp * (stkval[i, j] - k))\n\treturn optval[0, 0]", "def _find_k_offsets(self, k, d):\n olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;\n # it's just the 'inf' boundaries\n delta = k * d\n sp.seterr(**olderr) # turn the error back on\n return delta", "def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (6, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = x_est_predict[0:3, ]\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n # heliSim_state = np.resize(x_est_update, (1, 6))[0]\n # heliSim_state = np.pad(heliSim_state, (0, 2), \"constant\")\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 6))[0],\n ModelType.EASY)\n x_est_update = np.resize(corrected_state, (6, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n return x_est_update", "def err_k(self, X, Y):\n\t\tY_hat = self.predict(X)\n\t\treturn np.mean(Y_hat != from_1_of_k(Y))", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def _K_computations(self, X, X2=None):\r\n self._lengthscales=self.mapping.f(X)\r\n self._lengthscales2=np.square(self._lengthscales)\r\n if X2==None:\r\n self._lengthscales_two = self._lengthscales\r\n self._lengthscales_two2 = self._lengthscales2\r\n Xsquare = np.square(X).sum(1)\r\n self._K_dist2 = -2.*tdot(X) + Xsquare[:, None] + Xsquare[None, :]\r\n else:\r\n self._lengthscales_two = self.mapping.f(X2)\r\n self._lengthscales_two2 = np.square(self._lengthscales_two)\r\n self._K_dist2 = -2.*np.dot(X, X2.T) + np.square(X).sum(1)[:, None] + np.square(X2).sum(1)[None, :]\r\n self._w2 = self._lengthscales2 + self._lengthscales_two2.T\r\n prod_length = self._lengthscales*self._lengthscales_two.T\r\n self._K_exponential = np.exp(-self._K_dist2/self._w2)\r\n self._K_dvar = np.sign(prod_length)*(2*np.abs(prod_length)/self._w2)**(self.input_dim/2.)*np.exp(-self._K_dist2/self._w2)", "def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))", "def calc_K_tilda(kernel: Type[Kern], X_train: np.array, X_m: np.array):\n Knn = kernel.K(X_train, X_train)\n Knm = kernel.K(X_train, X_m)\n Kmn = kernel.K(X_m, X_train)\n Kmm = kernel.K(X_m, X_m)\n temp = np.dot(np.dot(Knm, np.linalg.inv(Kmm)), Kmn)\n K_tilda = np.subtract(Knn, temp)\n return K_tilda", "def updated_rating(old: int, expected: int, actual: int) -> int:\n return round(old + ELO_K_FACTOR * (actual - expected))", "def diff_k_p2(p1, p2, t=0.):\n return p2", "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def test_estimate_jackknife():\n X = 100\n T = 300.00\n delta_beta = 1.0\n jk_f = np.ones(X)\n jk_sym1 = np.ones(X)\n jk_sym2 = np.ones(X) + 1\n\n kBT = boltzman * pow(T, 2.)\n input_dict = {\"E\": -1.0, \"Cv\": 1.0 / kBT}\n\n ret = jk.estimate_jackknife(X, T, delta_beta, input_dict, jk_f, jk_sym1, jk_sym2)\n\n assert ret[\"E\"] == -1.0\n assert ret[\"E error\"] == 0.0\n assert math.isclose(ret[\"Cv\"], 1.0 / kBT)\n assert math.isclose(ret[\"Cv error\"], 2.7755575615628914e-16)\n return", "def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (8, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = np.concatenate((x_est_predict[0:3], x_est_predict[6:8]))\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 8))[0],\n ModelType.GYROMOMENT)\n # corrected_state = x_est_update\n x_est_update = np.resize(corrected_state, (8, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_est_update", "def optimization_rel_disp_K(self, step_size, dof1, dof2, controlDOF):\n \"\"\"First, the initial variance of the system is computed for the matrices given by the user\"\"\"\n Variance = []\n \"\"\"The sign variable keeps track of the direction the algorithm is stepping\"\"\"\n sign = [0]\n Variance.append(self.VarianceOfResponse())\n\n \"\"\"The matrix K is incremented by one step with size = step_size. for the spring that connects dof1 and dof2\"\"\"\n sign.append(1)\n self.incrementK(step_size, dof1, dof2)\n Variance.append(self.VarianceOfResponse())\n\n \"\"\"Loop that iteratively steps down the optimal direction of increasing or reducing K\"\"\"\n i = 0\n while len(sign) < 4 or np.sum(sign[-4:]) != 0 or i < 100:\n i += 1\n print(abs(Variance[-2][int(controlDOF[0] - 1)] - Variance[-2][int(controlDOF[1] - 1)]))\n \"\"\"First condition demands a minimum of 4 steps. Second condition sums up the direction of the last 4 steps\n if the sum equals 0, the minimum value for the variance has been found.\"\"\"\n if abs(Variance[-2][int(controlDOF[0] - 1)] - Variance[-2][int(controlDOF[1] - 1)]) \\\n > abs(Variance[-1][int(controlDOF[0] - 1)] - Variance[-1][int(controlDOF[1] - 1)]):\n \"\"\"If the last variance is smaller than the variance before, keep going in that direction.\"\"\"\n self.incrementK(step_size * sign[-1], dof1, dof2)\n Variance.append(self.VarianceOfResponse())\n sign.append(sign[-1])\n elif abs(Variance[-2][int(controlDOF[0] - 1)] - Variance[-2][int(controlDOF[1] - 1)]) \\\n < abs(Variance[-1][int(controlDOF[0] - 1)] - Variance[-1][int(controlDOF[1] - 1)]):\n \"\"\"If the last variance is bigger than the one before, turn around and go the other way.\"\"\"\n self.incrementK(step_size * sign[-1] * -1, dof1, dof2)\n Variance.append(self.VarianceOfResponse())\n sign.append(sign[-1] * -1)\n return Variance", "def sensitivity(gas, surf, old_data, temp, dk, thermo=False):\n rxns = []\n sens1 = []\n sens2 = []\n sens3 = []\n sens4 = []\n sens5 = []\n sens6 = []\n sens7 = []\n sens8 = []\n sens9 = []\n sens10 = []\n sens11 = []\n sens12 = []\n\n gas_out_data, gas_names_data, dist_array_data, T_array_data = old_data\n\n reference = []\n for a in range(len(gas_names_data)):\n reference.append([gas_names_data[a], [gas_out_data[:, a]]])\n\n # getting the ratio\n for x in reference:\n if x[0] == 'CH4(2)':\n ch4_in = x[1][0][0]\n if x[0] == 'O2(3)':\n o2_in = x[1][0][0]\n if x[0] == 'Ar':\n ar_in = x[1][0][0]\n ratio = ch4_in / (2 * o2_in)\n moles_in = [ch4_in, o2_in, ar_in]\n\n for x in reference:\n if x[0] == 'CH4(2)':\n ch4_in = x[1][0][0]\n ch4_out = x[1][0][-1]\n if ch4_out < 0:\n ch4_out = 0.\n ch4_depletion = ch4_in - ch4_out\n if ch4_depletion <= 1.0e-8:\n ch4_depletion = 1.0e-8\n reference_ch4_conv = 1.0e-8\n else:\n reference_ch4_conv = ch4_depletion / ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n o2_out = x[1][0][-1]\n if o2_out < 0:\n o2_out = 1.0e-15 # O2 can't be negative\n elif o2_out > o2_in:\n o2_out = o2_in # O2 can't be created, to make it equal to O2 in\n if x[0] == 'CO(7)':\n co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n co2_out = x[1][0][-1]\n\n if reference_ch4_conv <= 1.0e-8:\n reference_h2_sel = 1.0e-8\n reference_co_sel = 1.0e-8\n reference_syngas_selectivity = 1.0e-8\n reference_syngas_yield = 1.0e-8\n reference_co_yield = 1.0e-8\n reference_h2_yield = 1.0e-8\n reference_full_oxidation_selectivity = 1.0e-8\n reference_full_oxidation_yield = 1.0e-8\n else:\n # negative sensitivity is higher selectivity\n reference_h2_sel = h2_out / (ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n if reference_h2_sel <= 0:\n reference_h2_sel = 1.0e-15 # selectivity can't be 0\n\n reference_co_sel = co_out / ch4_depletion # Sensitivity definition 3: CO selectivity\n if reference_co_sel <= 0:\n reference_co_sel = 1.0e-15 # selectivity can't be 0\n\n reference_syngas_selectivity = reference_co_sel + reference_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n\n reference_syngas_yield = reference_syngas_selectivity * reference_ch4_conv # Sensitivity definition 2: SYNGAS yield\n if reference_syngas_yield <= 0:\n reference_syngas_yield = 1.0e-15 # yield can't be 0\n\n reference_co_yield = co_out / ch4_in # Sensitivity definition 4: CO % yield\n # reference_co_yield = reference_co_sel * reference_ch4_conv\n\n reference_h2_yield = h2_out / (2 * ch4_in) # Sensitivity definition 6: H2 % yield\n # reference_h2_yield = reference_h2_sel * reference_ch4_conv\n\n # Sensitivity definition 8: H2O + CO2 selectivity\n reference_h2o_sel = h2o_out / (ch4_depletion * 2)\n reference_co2_sel = co2_out / ch4_depletion\n if reference_h2o_sel <= 0:\n reference_h2o_sel = 1.0e-15 # H2O selectivity can't be 0\n if reference_co2_sel <= 0:\n reference_co2_sel = 1.0e-15 # CO2 selectivity can't be 0\n reference_full_oxidation_selectivity = reference_h2o_sel + reference_co2_sel\n\n # Sensitivity definition 9: H2O + CO2 yield\n reference_full_oxidation_yield = reference_full_oxidation_selectivity * reference_ch4_conv\n\n # Sensitivity definition 10: exit temperature\n reference_exit_temp = T_array_data[-1]\n\n # Sensitivity definition 11: peak temperature\n reference_peak_temp = max(T_array_data)\n\n # Sensitivity definition 12: distance to peak temperautre\n reference_peak_temp_dist = dist_array_data[T_array_data.index(max(T_array_data))]\n\n # run the simulations\n if thermo is True:\n for m in range(surf.n_species):\n s = surf.species(m)\n original_coeffs = s.thermo.coeffs\n perturbed_coeffs = np.ones_like(original_coeffs)\n perturbed_coeffs[0] = original_coeffs[0]\n perturbed_coeffs[1:6] = original_coeffs[1:6]\n perturbed_coeffs[7:13] = original_coeffs[7:13]\n perturbed_coeffs[14] = original_coeffs[14]\n # perturbed_coeffs[6] = original_coeffs[6] + original_coeffs[6]*dk\n # perturbed_coeffs[13] = original_coeffs[13] + original_coeffs[13]*dk\n perturbed_coeffs[6] = original_coeffs[6] + dk\n perturbed_coeffs[13] = original_coeffs[13] + dk\n s.thermo = ct.NasaPoly2(100.000, 5000.000, ct.one_atm, perturbed_coeffs)\n surf.modify_species(m, s)\n c = monolithFull(gas, surf, temp, moles_in)\n\n gas_out, surf_out, gas_names, surf_names, dist_array, T_array = c\n\n new_amts = []\n for a in range(len(gas_names)):\n new_amts.append([gas_names[a], [gas_out[:, a]]])\n\n for x in new_amts:\n if x[0] == 'CH4(2)':\n new_ch4_in = x[1][0][0]\n new_ch4_out = x[1][0][-1]\n if new_ch4_out < 0:\n new_ch4_out = 0.\n new_ch4_depletion = new_ch4_in - new_ch4_out\n if new_ch4_depletion <= 1e-8:\n new_ch4_depletion = 1e-8\n new_ch4_conv = 1e-8\n else:\n new_ch4_conv = new_ch4_depletion / new_ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n new_o2_in = x[1][0][0]\n new_o2_out = x[1][0][-1]\n if new_o2_out < 0:\n new_o2_out = 1.0e-15\n elif new_o2_out > new_o2_in:\n new_o2_out = new_o2_in\n if x[0] == 'CO(7)':\n new_co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n new_h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n new_h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n new_co2_out = x[1][0][-1]\n\n if new_ch4_conv <= 1e-8:\n new_h2_sel = 1.0e-8\n new_co_sel = 1.0e-8\n new_syngas_selectivity = 1.0e-8\n new_syngas_yield = 1.0e-8\n new_co_yield = 1.0e-8\n new_h2_yield = 1.0e-8\n new_full_oxidation_selectivity = 1.0e-8\n new_full_oxidation_yield = 1.0e-8\n else:\n new_h2_sel = new_h2_out / (new_ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n new_co_sel = new_co_out / new_ch4_depletion # Sensitivity definition 3: CO selectivity\n new_syngas_selectivity = new_co_sel + new_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n new_syngas_yield = new_syngas_selectivity * new_ch4_conv # Sensitivity definition 2: SYNGAS yield\n new_co_yield = new_co_out / new_ch4_in # Sensitivity definition 4: CO % yield\n new_h2_yield = new_h2_out / (2 * new_ch4_in) # Sensitivity definition 6: H2 % yield\n new_h2o_sel = new_h2o_out / (new_ch4_depletion * 2) # Sensitivity definition 8: H2O + CO2 selectivity\n new_co2_sel = new_co2_out / new_ch4_depletion\n new_full_oxidation_selectivity = new_h2o_sel + new_co2_sel\n new_full_oxidation_yield = new_full_oxidation_selectivity * new_ch4_conv # Sensitivity definition 9: C2O + CO2 yield\n\n Sens5 = (new_h2_sel - reference_h2_sel) / (reference_h2_sel * dk)\n sens5.append(Sens5)\n\n Sens3 = (new_co_sel - reference_co_sel) / (reference_co_sel * dk)\n sens3.append(Sens3)\n\n Sens1 = (new_syngas_selectivity - reference_syngas_selectivity) / (reference_syngas_selectivity * dk)\n sens1.append(Sens1)\n\n Sens2 = (new_syngas_yield - reference_syngas_yield) / (reference_syngas_yield * dk)\n sens2.append(Sens2)\n\n Sens4 = (new_co_yield - reference_co_yield) / (reference_co_yield * dk)\n sens4.append(Sens4)\n\n Sens6 = (new_h2_yield - reference_h2_yield) / (reference_h2_yield * dk)\n sens6.append(Sens6)\n\n Sens7 = (new_ch4_conv - reference_ch4_conv) / (\n reference_ch4_conv * dk)\n sens7.append(Sens7)\n\n Sens8 = (new_full_oxidation_selectivity - reference_full_oxidation_selectivity) / (\n reference_full_oxidation_selectivity * dk)\n sens8.append(Sens8)\n\n Sens9 = (new_full_oxidation_yield - reference_full_oxidation_yield) / (reference_full_oxidation_yield * dk)\n sens9.append(Sens9)\n\n new_exit_temp = T_array[-1] # Sensitivity definition 10: exit temperature\n Sens10 = (new_exit_temp - reference_exit_temp) / (reference_exit_temp * dk)\n sens10.append(Sens10)\n\n new_peak_temp = max(T_array) # Sensitivity definition 11: peak temperature\n Sens11 = (new_peak_temp - reference_peak_temp) / (reference_peak_temp * dk)\n sens11.append(Sens11)\n\n new_peak_temp_dist = dist_array[\n T_array.index(max(T_array))] # Sensitivity definition 12: dist to peak temperature\n Sens12 = (new_peak_temp_dist - reference_peak_temp_dist) / (reference_peak_temp_dist * dk)\n sens12.append(Sens12)\n\n print \"%d %s %.3F %.3F\" % (m, surf.species_name(m), Sens1, Sens2)\n rxns.append(surf.species_name(m))\n\n # this step is essential, otherwise mechanism will have been altered\n s.thermo = ct.NasaPoly2(100.000, 5000.000, ct.one_atm, original_coeffs)\n surf.modify_species(m, s)\n else:\n for rxn in range(surf.n_reactions):\n c = monolithFull(gas, surf, temp, moles_in, sens=[dk, rxn])\n gas_out, surf_out, gas_names, surf_names, dist_array, T_array = c\n\n new_amts = []\n for a in range(len(gas_names)):\n new_amts.append([gas_names[a], [gas_out[:, a]]])\n\n for x in new_amts:\n if x[0] == 'CH4(2)':\n new_ch4_in = x[1][0][0]\n new_ch4_out = x[1][0][-1]\n if new_ch4_out < 0:\n new_ch4_out = 0.\n new_ch4_depletion = new_ch4_in - new_ch4_out\n if new_ch4_depletion <= 1e-8:\n new_ch4_depletion = 1e-8\n new_ch4_conv = 1e-8\n else:\n new_ch4_conv = new_ch4_depletion / new_ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n new_o2_in = x[1][0][0]\n new_o2_out = x[1][0][-1]\n if new_o2_out < 0:\n new_o2_out = 1.0e-15\n elif new_o2_out > new_o2_in:\n new_o2_out = new_o2_in\n if x[0] == 'CO(7)':\n new_co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n new_h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n new_h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n new_co2_out = x[1][0][-1]\n\n if new_ch4_conv <= 1e-8:\n new_h2_sel = 1.0e-8\n new_co_sel = 1.0e-8\n new_syngas_selectivity = 1.0e-8\n new_syngas_yield = 1.0e-8\n new_co_yield = 1.0e-8\n new_h2_yield = 1.0e-8\n new_full_oxidation_selectivity = 1.0e-8\n new_full_oxidation_yield = 1.0e-8\n else:\n new_h2_sel = new_h2_out / (new_ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n new_co_sel = new_co_out / new_ch4_depletion # Sensitivity definition 3: CO selectivity\n new_syngas_selectivity = new_co_sel + new_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n new_syngas_yield = new_syngas_selectivity * new_ch4_conv # Sensitivity definition 2: SYNGAS yield\n new_co_yield = new_co_out / new_ch4_in # Sensitivity definition 4: CO % yield\n new_h2_yield = new_h2_out / (2 * new_ch4_in) # Sensitivity definition 6: H2 % yield\n new_h2o_sel = new_h2o_out / (new_ch4_depletion * 2) # Sensitivity definition 8: H2O + CO2 selectivity\n new_co2_sel = new_co2_out / new_ch4_depletion\n new_full_oxidation_selectivity = new_h2o_sel + new_co2_sel\n new_full_oxidation_yield = new_full_oxidation_selectivity * new_ch4_conv # Sensitivity definition 9: C2O + CO2 yield\n\n Sens5 = (new_h2_sel - reference_h2_sel) / (reference_h2_sel * dk)\n sens5.append(Sens5)\n\n Sens3 = (new_co_sel - reference_co_sel) / (reference_co_sel * dk)\n sens3.append(Sens3)\n\n Sens1 = (new_syngas_selectivity - reference_syngas_selectivity) / (reference_syngas_selectivity * dk)\n sens1.append(Sens1)\n\n Sens2 = (new_syngas_yield - reference_syngas_yield) / (reference_syngas_yield * dk)\n sens2.append(Sens2)\n\n Sens4 = (new_co_yield - reference_co_yield) / (reference_co_yield * dk)\n sens4.append(Sens4)\n\n Sens6 = (new_h2_yield - reference_h2_yield) / (reference_h2_yield * dk)\n sens6.append(Sens6)\n\n Sens7 = (new_ch4_conv - reference_ch4_conv) / (\n reference_ch4_conv * dk)\n sens7.append(Sens7)\n\n Sens8 = (new_full_oxidation_selectivity - reference_full_oxidation_selectivity) / (\n reference_full_oxidation_selectivity * dk)\n sens8.append(Sens8)\n\n Sens9 = (new_full_oxidation_yield - reference_full_oxidation_yield) / (reference_full_oxidation_yield * dk)\n sens9.append(Sens9)\n\n new_exit_temp = T_array[-1] # Sensitivity definition 10: exit temperature\n Sens10 = (new_exit_temp - reference_exit_temp) / (reference_exit_temp * dk)\n sens10.append(Sens10)\n\n new_peak_temp = max(T_array) # Sensitivity definition 11: peak temperature\n Sens11 = (new_peak_temp - reference_peak_temp) / (reference_peak_temp * dk)\n sens11.append(Sens11)\n\n new_peak_temp_dist = dist_array[T_array.index(max(T_array))] # Sensitivity definition 12: dist to peak temperature\n Sens12 = (new_peak_temp_dist - reference_peak_temp_dist) / (reference_peak_temp_dist * dk)\n sens12.append(Sens12)\n\n print \"%d %s %.3F %.3F\" % (rxn, surf.reaction_equations()[rxn], Sens1, Sens2)\n rxns.append(surf.reaction_equations()[rxn])\n\n return rxns, sens1, sens2, sens3, sens4, sens5, sens6, sens7, sens8, sens9, sens10, sens11, sens12", "def test_calc_kappa(test_coords):\n assert_almost_equal(calc_kappa(53.05187), 5762.687, decimal=3)", "def KS(self, using, dx=0.0001):\n pits = np.array(self.PIT(using=using,dx=dx))\n ks_result = skgof.ks_test(pits, stats.uniform())\n return ks_result.statistic, ks_result.pvalue", "def _Kgradients(self):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlp = self.noise_model.dlogpdf_df(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #Explicit\r\n #expl_a = np.dot(self.Ki_f, self.Ki_f.T)\r\n #expl_b = self.Wi_K_i\r\n #expl = 0.5*expl_a - 0.5*expl_b\r\n #dL_dthetaK_exp = dK_dthetaK(expl, X)\r\n\r\n #Implicit\r\n impl = mdot(dlp, dL_dfhat, I_KW_i)\r\n\r\n #No longer required as we are computing these in the gp already\r\n #otherwise we would take them away and add them back\r\n #dL_dthetaK_imp = dK_dthetaK(impl, X)\r\n #dL_dthetaK = dL_dthetaK_exp + dL_dthetaK_imp\r\n #dL_dK = expl + impl\r\n\r\n #No need to compute explicit as we are computing dZ_dK to account\r\n #for the difference between the K gradients of a normal GP,\r\n #and the K gradients including the implicit part\r\n dL_dK = impl\r\n return dL_dK", "def KroDelta(a,b):\n \n if (a==b):\n return 1\n else:\n return 0", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def rk4(self, t, h,G) :\r\n k1 = h*self.calc_diff_eqn(t, self.quant_vec,G,self.mass_vec)\r\n k2 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k1 ,G, self.mass_vec)\r\n k3 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k2 ,G, self.mass_vec)\r\n k4 = h*self.calc_diff_eqn(t + h , self.quant_vec + k3 ,G, self.mass_vec)\r\n y_new = self.quant_vec + ((k1 + 2*k2 + 2*k3 + k4)/6)\r\n return y_new", "def old():\n therm = [[300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.]]\n ts = np.linspace(0, 50, 1000)\n\n #odvod temperature bo vsota gradientov (diferencialov) z desne in z leve glede x\n #dT/dt[i] = K/x^2 * (temperature[i-1]- 2*temperature[i] + temperature[i+1])\n #razen ce je robna tocka\n #potem je treba nekaj scarat - robna bo funkcija\n def odvod(indeks, arr, K, time):\n odvodt = K * (arr[indeks-1][time] - 2*arr[indeks][time] + arr[indeks+1][time])\n return odvodt\n\n def robna(time):\n return 5*m.cos(0.05*time)\n\n\n K = 0.02\n x = 0.003\n\n def main_old():\n t = 0\n dt = 50. / 1000.\n for time in ts:\n for i in range(0,9):\n therm[i].append(therm[i][t] + (robna(time) if i==0 else odvod(i, therm, K, t)*dt/(x**2)))\n therm[9].append(300.)\n t+=1\n\n import matplotlib.pyplot as plt\n\n plt.plot(ts[:], therm[4][:-1], label = 'T(t)')\n plt.show()\n \n main_old()", "def kramers_kronig_hs(deltaE, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5,\n full_output=True, prints = np.array([]), correct_S_s = False):\n output = {}\n # Constants and units\n me = 511.06\n\n e0 = 200 # keV\n beta =30 #mrad\n\n eaxis = deltaE[deltaE>0] #axis.axis.copy()\n ddeltaE = (np.max(deltaE) - np.min(deltaE))/(len(deltaE - 1))\n S_E = I_EELS[deltaE>0]\n y = I_EELS[deltaE>0]\n l = len(eaxis)\n i0 = N_ZLP\n \n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / ddeltaE#axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im/eaxis)*ddeltaE \n K = (K / (np.pi / 2) / (1 - 1. / n ** 2))\n te = (332.5 * K * ke / i0)\n if full_output is True:\n output['thickness'] = te\n elif t is not None:\n if N_ZLP is None:\n raise ValueError(\"The ZLP must be provided when the \"\n \"thickness is used for normalization.\")\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2*l) #2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * ddeltaE #axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint<0] = 0\n Srfint[Srfint>S_E] = S_E[Srfint>S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n output['S_s'] = Srfint\n del Srfint\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n if full_output is False:\n return eps\n else:\n return eps, output", "def solve_gamma(t, old, total):\n\n old, total = np.mean(old), np.mean(total)\n gamma = -1 / t * np.log(old / total)\n\n return gamma", "def rk_adaptive(accel,m,r,h,v,recur,emin=10**-12,emax=10**-8,hmax=.1,hmin=.01,recurmax=100):\n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (3/32.*k1r + 9/32.*k2r)*h)\n k3r = v + (3/32.*k1v + 9/32.*k2v)*h\n k4v = accel(m,r + (1932/2197.*k1r - 7200/2197.*k2r + 7296/2197.*k3r)*h)\n k4r = v + (1932/2197.*k1v - 7200/2197.*k2v + 7296/2197.*k3v)*h\n k5v = accel(m,r + (439/216.*k1r - 8*k2r + 3680/513.*k3r - 845/4104.*k4r)*h)\n k5r = v + (439/216.*k1v - 8*k2v + 3680/513.*k3v - 845/4104.*k4v)*h\n k6v = accel(m,r - (8/27.*k1r + 2*k2r - 3544/2565.*k3r + 1859/4104.*k4r - 11/40.*k5r)*h)\n k6r = v - (8/27.*k1v + 2*k2v - 3544/2565.*k3v + 1859/4104.*k4v - 11/40.*k5v)*h\n\n # 4th order calculation\n new_v4 = v + h*(25/216.*k1v + 1408/2565.*k3v + 2197/4104.*k4v - 1/5.*k5v)\n new_r4 = r + h*(25/216.*k1r + 1408/2565.*k3r + 2197/4104.*k4r - 1/5.*k5r)\n \n # 5th order calculation\n new_v5 = v + h*(16/135.*k1v + 6656/12825.*k3v+28561/56430.*k4v - 9/50.*k5v + 2/55.*k6v) \n new_r5 = r + h*(16/135.*k1r + 6656/12825.*k3r+28561/56430.*k4r - 9/50.*k5r + 2/55.*k6r) \n\n # Calculate truncation error between 5th and 4th order\n eps = np.abs( (np.max(np.abs(new_r5)) - np.max(np.abs(new_r4))) / np.max(np.abs(new_r4)))\n \n # Compare eps to emin and emax and update h accordingly\n if np.max(eps) < emin:\n if h*2.0 < hmax:\n h *= 2.0\n new_v = new_v5\n new_r = new_r5 \n \n if np.max(eps) > emax:\n if h/2.0 > hmin:\n h /= 2.0\n print h\n # Error too large, call rk_adaptive again with smaller h\n if recur < recurmax:\n recur += 1\n rk_adaptive(accel,m,r,h,v,recur)\n new_v = new_v5\n new_r = new_r5\n \n else:\n new_v = new_v5\n new_r = new_r5\n \n return new_v, new_r, h", "def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)", "def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-1./self.lengthscale**2,0.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = 1./2*Gint + self.lengthscale/2*dGint_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n #IPPprim2[0,0] = 2*(self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2))\r\n r2,omega2,phi2 = dLa_dper2.T,Lo[:,0:1],dLp_dper2.T\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale/2*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)))\r\n\r\n dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)\r\n\r\n target[0] += np.sum(dK_dvar*dL_dK)\r\n target[1] += np.sum(dK_dlen*dL_dK)\r\n target[2] += np.sum(dK_dper*dL_dK)", "def K(self, u, v):\n return (self.L(u, v) * self.N(u, v) - np.square(self.M(u, v))) / \\\n (self.E(u, v) * self.G(u, v) - np.square(self.F(u, v)))", "def price_heston_mc(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_):\r\n esp_ = monte_carlo(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_)\r\n return exp(-r_*T_)*esp_" ]
[ "0.6411429", "0.61664015", "0.60879153", "0.59830314", "0.58720684", "0.5814098", "0.58111423", "0.58111423", "0.577404", "0.5765074", "0.5746197", "0.57347727", "0.5694414", "0.5672527", "0.5637371", "0.5624423", "0.56225246", "0.5611478", "0.5611203", "0.5557416", "0.5555901", "0.5545839", "0.5539001", "0.5519944", "0.54987246", "0.5487196", "0.5485526", "0.5475468", "0.5465655", "0.54407054", "0.54297286", "0.5413739", "0.54039866", "0.5398821", "0.539731", "0.53924847", "0.5384838", "0.5383824", "0.5383052", "0.538219", "0.5371888", "0.53651905", "0.5364333", "0.535864", "0.5350602", "0.53501207", "0.534204", "0.5340611", "0.5335728", "0.53316003", "0.53298837", "0.5325229", "0.5321421", "0.5318152", "0.53168213", "0.5316668", "0.5314634", "0.531446", "0.53111595", "0.5307363", "0.53026426", "0.52908736", "0.5287677", "0.5287202", "0.5286938", "0.5280484", "0.52759445", "0.5272048", "0.5265578", "0.52492756", "0.52461296", "0.5237068", "0.52325386", "0.5231941", "0.5231628", "0.52310234", "0.5229983", "0.52298576", "0.5226044", "0.52251166", "0.5222672", "0.522242", "0.5222272", "0.5221614", "0.5221545", "0.52120227", "0.5209348", "0.5206215", "0.52036417", "0.5194325", "0.51910794", "0.5186964", "0.5170503", "0.5166939", "0.516453", "0.51619154", "0.5159256", "0.5158139", "0.5153991", "0.5150197", "0.51452804" ]
0.0
-1
Warn the user that running with nipy being imported locally is a bad idea.
def _test_local_install(): if os.getcwd() == os.sep.join( os.path.abspath(__file__).split(os.sep)[:-2]): import warnings warnings.warn('Running the tests from the install directory may ' 'trigger some failures')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_nuke():\n try:\n import _nuke\n return True\n except ImportError:\n return False", "def provoke_and_handle_ImportError():\n try:\n from you import me\n except ImportError as impe:\n print(f\"Sorry! {impe}\")", "def unavailable_importer(**kwargs):\n return LazyImportTester(\"_qiskit_this_module_does_not_exist_\", **kwargs)", "def _suppress_warnings():\n import warnings\n import sys\n import os\n if os.path.basename(sys.argv[0]) != \"trial\":\n warnings.simplefilter(\"ignore\")", "def disabled_test_no_warnings(import_path: str) -> None:\n imp_cmd = (\n sys.executable,\n '-W', 'error',\n '-c', f'import {import_path!s}',\n )\n\n subprocess.check_call(imp_cmd)", "def runipy_available():\n try:\n import runipy\n except ImportError:\n return False\n return True", "def warning(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def lint():\n toolkit.lint(exclude=[\"__init__.py\"])", "def no_xb_gui():\n logger.warning(\"Could not import the GUI.\")\n logger.warning(\"For instructions on how to install the GUI,\")\n logger.warning(\"check the docs janclemenslab.org/das/install.html.\")", "def import_fail_info(mod_name,fns=None):\n\n if fns == None:\n warn(\"Loading of %s failed.\\n\" % (mod_name,))\n else:\n warn(\"Loading of %s from %s failed.\\n\" % (fns,mod_name))", "def importlib_only(fxn):\n return unittest.skipIf(using___import__, \"importlib-specific test\")(fxn)", "def issilent():\n return GLOBAL['VERBOSE'] == False", "def skip_require():\n global ignore_once\n ignore_once = True", "def noCheck():\n dislin.nochek()", "def has_warnings(self) -> bool:", "def is_unexposed(self):\r\n return conf.lib.clang_isUnexposed(self)", "def test_if_ipython():\n try:\n return __IPYTHON__\n except NameError:\n return False", "def provoke_and_handle_ModuleNotFoundError():\n try:\n import arsiton324\n except ModuleNotFoundError as mnfe:\n print(f\"Sorry! {mnfe} was found!\")", "def ignore_builtin_verification():\n return not current_space().skip_builtin_verification", "def _should_ignore(self, name):\n _name = name.lower()\n return (_name.startswith(\"deprecated\") or\n _name.startswith(\"_\") or\n _name in (\"remote\", \"reserved\",\n \"dialogs_py\", \"dialogs_ipy\", \"dialogs_jy\"))", "def test_getattr_error_attr_not_found():\n with pytest.raises(ImportError):\n from astropy.cosmology.flrw import this_is_not_a_variable # noqa: F401", "def add_warning_non_file(self, code: Code, msg: str,\n is_persistant: bool = False) -> None:", "def warn():\n pass", "def import_packages_global():\n return \"\"", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def no_afni():\n if Info.version() is None:\n return True\n return False", "def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()", "def _check_imports():\n\n optlist = ['ALPSO', 'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'NSGA2', 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG', 'NOMAD']\n\n for optimizer in optlist[:]:\n try:\n __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n except ImportError:\n optlist.remove(optimizer)\n\n return optlist", "def has_warnings_active(self) -> bool:", "def missing_in_gn(self):\n return self._missing_gn_flags", "def main():\n\n warning_message = \"This script contains the code that builds the \" \\\n + \"main page of the FCA (File Converter App).\" \\\n + \"\\n\\nThis script should NOT be run DIRECTLY.\" \\\n + \"\\n\\nPlease, import it in another script.\"\n\n cGUIf.show_warning(\"Import warning\",warning_message)", "def test_ensureWhenNotImported(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\", \"m3\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None, \"m3\": None})", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def missing_in_gyp_by_file(self):\n return self._missing_gyp_files", "def _c_optimizations_ignored():\n pure_env = os.environ.get('PURE_PYTHON')\n return pure_env != \"0\" if pure_env is not None else PYPY", "def is_running_locally():\n local = False\n if \"POLYAXON_NO_OP\" in os.environ:\n local = True\n return local", "def is_not_used(self):\n pass", "def has_errors_fatal(self) -> bool:", "def is_ipython():\n return 'get_ipython' in globals()", "def _strict_warning(self):\n if self.options.get('strict', True):\n return ('Strict mode enabled (the default), so this could be due to an '\n 'integer key, such as an HTTP status code.')\n return ('Strict mode disabled. Prance cannot help you narrow this further '\n 'down, sorry.')", "def _run_from_ipython():\n try:\n __IPYTHON__\n return True\n except NameError:\n return False", "def disableIncorrectNameWarning(*args, **kwargs)->None:\n pass", "def check_invalid_args_general(config):\n # Not mathematically correct, but might be required if prior is not\n # appropriate.\n if hasattr(config, 'kl_scale') and config.kl_scale != 1.0:\n warnings.warn('Prior matching term will be scaled by %f.'\n % config.kl_scale)\n\n if hasattr(config, 'store_final_model') and \\\n hasattr(config, 'train_from_scratch') and \\\n config.store_final_model and config.train_from_scratch:\n warnings.warn('Note, when training from scratch, the final model is ' +\n 'only trained on the last task!')", "def has_nlu(self):\n return self.metadata.has_been_coded_for(\"nlu\")", "def isIgnoredPkg(self, *args):\n return _libsbml.SBMLDocument_isIgnoredPkg(self, *args)", "def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})", "def test_import_allows_attributes_failure(self):\n # We can just use existing modules for this.\n name_map = {\n \"sys\": (\"executable\", \"path\"),\n \"builtins\": (\"list\", \"_qiskit_dummy_attribute_\"),\n }\n\n feature = LazyImportTester(name_map)\n self.assertFalse(feature)", "def filter_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n warnings.simplefilter(\"ignore\", category=LightningDeprecationWarning)", "def test_suppress_import(self):\n logging.info(\"testing suppress import\")\n\n generated_file = os.path.join(self._test_workspace,\n \"generated.suppress\")\n\n extract_cmd = ['CodeChecker', 'parse',\n os.path.join(self._test_workspace, \"reports\"),\n \"--suppress\", generated_file,\n \"--export-source-suppress\"\n ]\n\n ret = call_cmd(extract_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 2, \"Failed to generate suppress file.\")\n\n codechecker_cfg = env.import_test_cfg(\n self._test_workspace)['codechecker_cfg']\n\n product_url = env.parts_to_url(codechecker_cfg)\n import_cmd = ['CodeChecker', 'cmd', 'suppress', '-i', generated_file,\n '--url', product_url, self._run_name]\n\n print(import_cmd)\n ret = call_cmd(import_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 0, \"Failed to import suppress file.\")", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def setup_test():\n if LooseVersion(np.__version__) >= LooseVersion('1.14'):\n np.set_printoptions(legacy='1.13')\n\n # Temporary fix until scipy release in October 2018\n # must be removed after that\n # print the first occurrence of matching warnings for each location\n # (module + line number) where the warning is issued\n if (\n LooseVersion(np.__version__) >= LooseVersion('1.15')\n and LooseVersion(scipy.version.short_version) <= '1.1.0'\n ):\n warnings.simplefilter('default')", "def test_ImportError(n=2):\n\n p = bad_import()\n\n try:\n p.result()\n except ImportError:\n print(\"Caught ImportError\")\n else:\n assert False, \"Raise the wrong Error\"", "def no_coverage_warn():\n if covdb is None or len(covdb.coverage_files) == 0:\n show_message_box(\"Need to Import Traces First\",\n \"Can't perform this action yet, no traces have been imported\",\n MessageBoxButtonSet.OKButtonSet,\n MessageBoxIcon.ErrorIcon)\n return True\n return False", "def were_in_ipython():\n shell = get_ipython()\n if shell is None:\n raise NotInIPythonError\n else:\n return True", "def test_absent_imports():\n module, HABEMUS_MODULE = optional_import(\"not_real_module\")\n\n assert not HABEMUS_MODULE\n assert module.__name__ == \"not_real_module\"\n with pytest.raises(ModuleNotFoundError):\n _ = module.layers", "def is_imported():\n return len(inspect.stack()) > 3", "def test_missing_pypirc(self):\n self.assertEqual(\n package_manager.extra_pypi_index_servers('does not exist'),\n [])", "def ignore_python_warnings(function):\n\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n \"\"\"\n Wrapped function.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n return function(*args, **kwargs)\n\n return wrapped", "def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))", "def test_ensureWhenFailedToImport(self):\n modules = {\"m2\": None}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None})", "def load_numpy_distutils_misc_util(finder, module):\n module.IgnoreName(\"numscons\")", "def check_imports():\n try:\n import dns # pylint: disable=C0415,W0611 # noqa: F401\n import ecdsa # pylint: disable=C0415,W0611 # noqa: F401\n import google.protobuf # pylint: disable=C0415,W0611 # noqa: F401\n import jsonrpclib # pylint: disable=C0415,W0611 # noqa: F401\n import pyaes # pylint: disable=C0415,W0611 # noqa: F401\n import qrcode # pylint: disable=C0415,W0611 # noqa: F401\n import requests # pylint: disable=C0415 # noqa: F401\n except ImportError as i_e:\n sys.exit(\"Error: %s. Try 'sudo pip install <module-name>'\" % str(i_e))\n from google.protobuf import descriptor # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import message # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import reflection # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import ( # pylint: disable=C0415,W0611 # noqa: F401\n descriptor_pb2,\n )\n from jsonrpclib import ( # pylint: disable=C0415,W0611 # noqa: F401\n SimpleJSONRPCServer,\n )\n\n # make sure that certificates are here\n certs = requests.utils.DEFAULT_CA_BUNDLE_PATH\n if not os.path.exists(certs):\n raise AssertionError(\"Certificates not found\")", "def test_LPyModelDriver_nolpy(): # pragma: no lpy\n assert_raises(RuntimeError, LPyModelDriver.LPyModelDriver,\n 'test', scripts['lpy'])", "def missing_in_gyp(self):\n return self._missing_gyp_flags", "def __init__(self):\n super().__init__(\n \"This module requires installation of megnet, \"\n \"which is not found in your current environment.\"\n \"Please install it via `pip install megnet` or \"\n \"via github source\"\n )", "def _should_ignore_module(cls, module_name):\n # exclude test modules for now to avoid spurious failures\n # TODO(jelle): enable for test modules too\n return module_name.split(\".\")[-1].startswith(\"test\")", "def modifyComponentsNotPreferableOnServer(self):\n # Nothing to do\n pass", "def ignore_warnings(my_func):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n This is where the warning suppression occurs.\n \"\"\"\n if sys.version_info >= (3, 2):\n warnings.simplefilter(\"ignore\", ResourceWarning)\n with warnings.catch_warnings():\n my_func(self, *args, **kwargs)\n\n return wrapper", "def missing_ltihooks(finder, caller):\n caller.IgnoreName(\"ltihooks\")", "def no_prereq_install():\n return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))", "def init_warnings():\n warnings.simplefilter(\"ignore\", category=AstropyWarning)", "def discardedException( self, exc_stuff ):\n extype = exc_stuff[0]\n value = exc_stuff[1]\n tb = exc_stuff[2]\n if isJython :\n # Misc intermediate ImportError exceptions may occur\n # during Jython import we need to ignore them ; if a module can't be\n # really imported a final ImportError will break the execution of the debuggee\n # so it's safe to ignore ImportError for Jythn here\n if issubclass(extype,ImportError) :\n return True # discarded\n return False", "def test_import_error_message_maintained(self):\n settings = GrpcSettings({\n 'INTERCEPTORS': [\n ('tests.invalid_module.InvalidClassName', {})\n ]\n })\n with self.assertRaises(ImportError):\n _ = settings.INTERCEPTORS", "def run_from_ipython():\n try:\n __IPYTHON__\n return True\n except NameError:\n return False", "def error_impresion(self):\n self._info(\"error_impresion\")", "def requirement_missing(script):\n if \"requires\" in script:\n if script[\"requires\"] is None:\n return False\n for package in script[\"requires\"].split():\n try:\n pkg_resources.working_set.require(package)\n except Exception:\n return True\n return False", "def allow_warnings(self):\n return self._allow_warnings", "def not_installed(self) -> bool:\n return pulumi.get(self, \"not_installed\")", "def _validate_running_as_fvalidation_exempt_user():\n if acm.UserName() not in FValidation_settings.SUPERUSERS:\n # Ensure that tool is run as a user exempt from FValidation\n # in order to avoid GUI pop-ups when touching entities.\n raise ValueError(\"This tool must be run by a user that is exempt from FValidation.\")", "def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)", "def add_ignore_module(modules: List[Any]):\n global BUILTIN_LIKELY_MODULES\n for module in modules:\n if module not in BUILTIN_LIKELY_MODULES:\n BUILTIN_LIKELY_MODULES.append(module)", "def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)", "def warning(self, *args, **kwargs):", "def check_stability(self):", "def add_warning_non_file(self, code: Code, msg: str,\n is_persistant: bool = False) -> None:\n self.add_error_non_file(code, msg, severity=Severity.WARNING,\n is_persistant=is_persistant)", "def no_require():\n from twill import commands\n \n l = commands.browser._post_load_hooks\n l = [ fn for fn in l if fn != _require_post_load_hook ]\n commands.browser._post_load_hooks = l\n\n global _requirements\n _requirements = []", "def is_notebook():\n return \"ipykernel\" in sys.modules", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def _set_insufficient_confidence_warning(\n self): # pragma: no cover\n self.failed_initial_confidence = True\n self.surface_result('LO_INIT_CONF')\n self.warnings.append(\n 'Bisect failed to reproduce the regression with enough confidence.')", "def test_check_model_dependencies_missing_warning(dependency_testing_model, complete_env) -> None:\n incomplete_env = Python(\n [r for r in complete_env.requirements if r not in ['PyYAML==0.0.1', 'pandas==0.0.1']]\n ) # drop a single dependency to be caught\n with warnings.catch_warnings(record=True) as caught_warnings:\n assert not check_model_dependencies(\n model_cls=dependency_testing_model,\n environment=incomplete_env,\n raise_for_missing=False,\n )\n warn_msg = caught_warnings[0].message.args[0]\n assert warn_msg == \"the following packages are required by the model but missing \" \\\n \"from the environment:\\npandas (installed via ['pandas'])\" \\\n \"\\nyaml (installed via ['PyYAML'])\"", "def has_err_warn(self):\r\n return self._arm.has_err_warn", "def test_import_nothandled():\n sys.meta_path.append(TaskImporter())\n with pytest.raises(ImportError):\n import_module('not.exist')", "def unsure_how_to_install(self):\n msg = \"Unsure how to install {0}.\".format(self.name)\n if self.info_uri:\n msg += \"\\nRefer to {0} for information\".format(self.info_uri)\n\n if platform.system() == 'Darwin':\n if 'brew' in self._provider_package and not helpers['brew']:\n msg += (\"\\nCOT can use Homebrew (https://brew.sh), \"\n \"if available on your system, to install {0}.\"\n .format(self.name))\n if 'port' in self._provider_package and not helpers['port']:\n msg += (\"\\nCOT can use MacPorts (https://www.macports.org/), \"\n \"if available on your system, to install {0}.\"\n .format(self.name))\n if ('brew' in self._provider_package or\n 'port' in self._provider_package):\n return RuntimeError(msg)\n else:\n return NotImplementedError(msg)\n elif platform.system() == 'Linux' and (\n ('apt-get' in self._provider_package or\n 'yum' in self._provider_package) and\n not (helpers['apt-get'] or helpers['yum'])):\n msg += (\"\\nCOT can use package managers 'yum' or 'apt-get' to\"\n \" install helpers on your system, but it appears that\"\n \" you have neither of these package managers?\")\n return RuntimeError(msg)\n else:\n return NotImplementedError(msg)", "def _warn(msg):\n warnings.warn(msg, TessyWarning, stacklevel=3)", "def check_no_silent_crash(self, override=False):\n if self.results:\n score = self.results.linter.stats.get('global_note', False)\n if score is False:\n messages = self.results.linter.stats.get('by_msg', {})\n if messages.get('syntax-error', False) and not override:\n self.logging.warning('\\n------------------------------------------------------------------')\n self.logging.warning('PYLINT FAILED BECAUSE SYNTAX ERROR.')\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning('\\n')\n self.failed_files.append(self.fname)\n return False\n self.logging.info('\\n------------------------------------------------------------------')\n self.logging.info('FILE WAS IGNORED.')\n self.logging.info('------------------------------------------------------------------')\n return True\n return False", "def test_noexceptions(nb_path, tmpdir):\n nb = load_notebook(nb_path)\n pyfile = \"%s.py\" % str(\n tmpdir.join(os.path.splitext(os.path.basename(nb_path))[0]))\n export_py(nb, pyfile)\n execfile(pyfile, {})", "def test_deprecated_modules(self):\n\n deprecated_modules_present = False\n\n deprecated_modules = [\n \"game_assets\",\n \"models\",\n \"world\",\n \"modular_assets\",\n ]\n\n for path in self.application_files:\n for module in deprecated_modules:\n module_text = open(path).read()\n found_reference = False\n if \"import %s\" % module in module_text:\n found_reference = True\n if \"from %s\" % module in module_text:\n found_reference = True\n\n if found_reference:\n print(\"Found '%s' reference in %s\" % (module, path))\n deprecated_modules_present = True\n\n self.assertFalse(deprecated_modules_present)", "def has_off_hook_warning(self) -> bool:", "def CheckNoBannedFunctions(input_api, output_api):\n warnings = []\n\n def GetMessageForFunction(input_api, affected_file, line_num, line, func_name,\n message):\n result = []\n if input_api.re.search(r\"^ *//\", line): # Ignore comments.\n return result\n if line.endswith(\" nocheck\"): # Ignore lines with nocheck comments.\n return result\n\n if func_name in line:\n result.append(' %s:%d:' % (affected_file.LocalPath(), line_num))\n for message_line in message:\n result.append(' %s' % message_line)\n\n return result\n\n\n file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))\n for f in input_api.AffectedFiles(file_filter=file_filter):\n for line_num, line in f.ChangedContents():\n for func_name, message in _BANNED_CPP_FUNCTIONS:\n problems = GetMessageForFunction(input_api, f, line_num, line,\n func_name, message)\n if problems:\n warnings.extend(problems)\n\n result = []\n if (warnings):\n result.append(output_api.PresubmitPromptWarning(\n 'Banned functions were used.\\n' + '\\n'.join(warnings)))\n return result", "def check_for_underscore(self):\n # If something injected a '_' variable in __builtin__, delete\n # ipython's automatic one so we don't clobber that. gettext() in\n # particular uses _, so we need to stay away from it.\n if '_' in __builtin__.__dict__:\n try:\n del self.shell.user_ns['_']\n except KeyError:\n pass" ]
[ "0.59475875", "0.5733747", "0.5700879", "0.56831914", "0.56009203", "0.5547987", "0.54913825", "0.5431076", "0.5426784", "0.53830874", "0.53597766", "0.53400105", "0.53313553", "0.53298444", "0.52693295", "0.5264683", "0.5247983", "0.5241664", "0.5224841", "0.5179244", "0.5177511", "0.5171002", "0.51662105", "0.5145082", "0.5107308", "0.5099041", "0.5096394", "0.5096328", "0.5078653", "0.5074061", "0.5054752", "0.5033172", "0.5029847", "0.5026492", "0.50252444", "0.5017961", "0.50152206", "0.5011554", "0.50023377", "0.5001105", "0.49909812", "0.49841407", "0.49789426", "0.49720275", "0.49714416", "0.49711275", "0.49626094", "0.4947703", "0.4932984", "0.49321315", "0.49293733", "0.4918651", "0.49062544", "0.489819", "0.4896423", "0.48954844", "0.48877853", "0.48831964", "0.4881329", "0.48712847", "0.48639172", "0.4861478", "0.48554665", "0.48434925", "0.48408797", "0.48392335", "0.48379812", "0.48349783", "0.48201376", "0.48159823", "0.48109084", "0.4810497", "0.48070475", "0.4804283", "0.47945318", "0.4786666", "0.4775008", "0.47696596", "0.4766923", "0.47640958", "0.476246", "0.47576", "0.47484392", "0.47471544", "0.4732011", "0.47281012", "0.472802", "0.47191772", "0.47189903", "0.4717881", "0.47144753", "0.4711507", "0.471103", "0.4704807", "0.47038782", "0.4699514", "0.46963176", "0.46950102", "0.46922806", "0.46902224" ]
0.5077109
29
Tire l'objectif d'une partie de chiffre.
def tire_objectif() -> int: return random.randint(min_objectif, max_objectif)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object(self):", "def test_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion)\n verif_bateau(joueur1, joueur2.porte_avion)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion)\n verif_bateau(joueur2, joueur1.porte_avion)", "def petite_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)", "def mezclar_bolsa(self):", "def grande_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)", "def objects(self):", "def copier(partie):\n # Instanciation d'une nouvelle partie\n partie_copiee = Partie()\n\n for tour in partie.tours:\n tour_tmp = Tour(tour.x, tour.y) # Instanciation d'un tour\n partie_copiee.nouveau_tour(tour_tmp) # Ajout du tour\n\n return partie_copiee", "def __init__(self, partie):\n self.ID = 0\n self.nbr_noeuds = 1\n self.nbr_feuilles = 1\n self.partie = partie\n self.nbr_fils = 0\n self.fils = []", "def sketch_und_part(self):\n if (self.dimension == '3D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(-self.laenge_x/2.0, -self.laenge_y/2.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0))\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseSolidExtrude(\n sketch=self.sketch_Wuerfel,\n depth=self.laenge_z/2.0) #z-Symmetrie\n #Sketch Pore zeichnen (fuer Quader und Zylinder)\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Ellipsoid' ):\n matlab.ellipsoidIgesOut(\n self.porenparameter_x,\n self.porenparameter_y,\n self.porenparameter_z,\n 'Ellipsoid')\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.sketch_Pore.ConstructionLine(\n # point1=(0.0, -100.0),\n # point2=(0.0, 100.0))\n # self.sketch_Pore.EllipseByCenterPerimeter(\n # center=(0.0, 0.0),\n # axisPoint1=(self.porenparameter_x/2.0, 0.0),\n # axisPoint2=(0.0, self.porenparameter_y/2.0))\n # self.sketch_Pore.autoTrimCurve(\n # curve1=self.sketch_Pore.geometry[3],\n # point1=(-self.porenparameter_x/2.0, 0.0))\n # self.sketch_Pore.Line(\n # point1=(0.0, self.porenparameter_y/2.0),\n # point2=(0.0, -self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n if (self.typ_Pore == 'Ellipsoid' ):\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.part_Pore.BaseSolidRevolve(\n # sketch=self.sketch_Pore,\n # angle=360.0,\n # flipRevolveDirection=OFF)\n self.iges_Datei = mdb.openIges(\n 'Ellipsoid.igs',\n msbo=False,\n trimCurve=DEFAULT,\n scaleFromFile=OFF)\n self.model.PartFromGeometryFile(\n name=self.name+'_Pore',\n geometryFile=self.iges_Datei,\n combine=False,\n stitchTolerance=1.0,\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY,\n convertToAnalytical=1,\n stitchEdges=1,\n scale=1) # Skalierung\n self.part_Pore = self.model.parts[self.name+'_Pore']\n self.part_Pore.AddCells(\n faceList = self.part_Pore.faces,\n flipped=False)\n del self.iges_Datei\n os.remove('abaqus_read_iges0.log') #Arbeitsordner aufraeumen\n os.remove('temp-Ellipsoid-new.sat')\n os.remove('Ellipsoid.igs')\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseSolidExtrude(\n sketch=self.sketch_Pore,\n depth=self.porenparameter_z)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n #Translation\n self.assembly.translate(\n instanceList=(self.name+'_Wuerfel', ),\n vector=(0.0, 0.0, -self.laenge_z/2.0))\n if (self.typ_Pore == 'Ellipsoid'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, 0.0))\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, -self.porenparameter_z/2.0))\n #Rotation\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(1.0, 0.0, 0.0),\n angle=self.porenparameter_rx)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 1.0, 0.0),\n angle=self.porenparameter_ry)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 0.0,1.0),\n angle=self.porenparameter_rz)\n #Schneiden\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n # del self.model.parts[self.name+'_Wuerfel']\n # del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n elif (self.dimension == '2D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(0.0, 0.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0)) #x- und y-Symmetrie\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseShell(sketch=self.sketch_Wuerfel)\n #Sketch Pore zeichnen\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Ellipsoid'):\n self.sketch_Pore.ConstructionLine(\n point1=(0.0, -100.0),\n point2=(0.0, 100.0))\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n self.sketch_Pore.autoTrimCurve(\n curve1=self.sketch_Pore.geometry[3],\n point1=(-self.porenparameter_x/2.0, 0.0))\n self.sketch_Pore.Line(\n point1=(0.0, self.porenparameter_y/2.0),\n point2=(0.0, -self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseShell(sketch=self.sketch_Pore)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, self.laenge_z/2.0),\n axisDirection=(0.0, 0.0, self.laenge_z/2.0+1),\n angle=self.porenparameter_rz)\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n del self.model.parts[self.name+'_Wuerfel']\n #del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n else:\n print('dimension Error!')", "def get_contenu(self):\n return self.objets", "def objectFields(self):\n raise NotImplementedError", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def test_get_part(self):\n pass", "def algoritmo(posicionInicial):\n\tglobal vertices, lados\n\n\t# Inicio el algoritmo QuickHull y almacenare el resultado en vertices\n\tconvex = quickHull.QuickHull(posicionInicial)\n\tvertices = convex.quickHull(verticesObjeto)\n\tprint(\"\\nVertices en la envolvente convexa:\\n\")\n\tfor vertex in (vertices):\n\t\tprint(vertex)\n\n\t# creo la lista de lados a partir del los vertices de la envolvente\n\tlados = creaLados(vertices)\n\n\t# creo la maya y el objeto\n\tmi_mesh = bpy.data.meshes.new(nombre)\n\tmi_objeto = bpy.data.objects.new(nombre, mi_mesh)\n\n\t# coloco el objeto en la misma posicion en la que estaba el objeto\n\t# anteriormente seleccionado.\n\tmi_objeto.location = posicionInicial\n\n\t# enlazo el objeto a la escena\n\tbpy.context.scene.objects.link(mi_objeto)\n\n\t# creo el la maya del objeto\n\tmi_mesh.from_pydata(vertices,lados,caras)\n\tmi_mesh.update(calc_edges=True)", "def NuevaPartida(self,):\n\t\"\"\" Numeros Disponibles \"\"\"\n\tDisponibles[0] = True\n\tDisponibles[1] = True\n\tDisponibles[2] = True\n\tDisponibles[3] = True\n\tDisponibles[4] = True\n\tDisponibles[5] = True\n\t\"\"\" Jugador Uno \"\"\"\n\tJ1[0] = 0\n\tJ1[1] = 0\n\tJ1[2] = 0\n\tJ1[3] = 0\n\tJ1[4] = 0\n\tJ1[5] = 0\n\t\"\"\" Jugador Dos \"\"\"\n\tJ2[0] = 0\n\tJ2[1] = 0\n\tJ2[2] = 0\n\tJ2[3] = 0\n\tJ2[4] = 0\n\tJ2[5] = 0\n\t\"\"\" Jugador Tres \"\"\"\n\tJ3[0] = 0\n\tJ3[1] = 0\n\tJ3[2] = 0\n\tJ3[3] = 0\n\tJ3[4] = 0\n\tJ3[5] = 0\n\t\"\"\" Jugador Cuatro \"\"\"\n\tJ4[0] = 0\n\tJ4[1] = 0\n\tJ4[2] = 0\n\tJ4[3] = 0\n\tJ4[4] = 0\n\tJ4[5] = 0", "def tenir_objet(self, nom_membre=None, objet=None):\n if nom_membre:\n membre = self.get_membre(nom_membre)\n else:\n membre = None\n for m in self.membres:\n if m.tenu is None and m.peut_tenir():\n membre = m\n break\n\n if membre is None:\n raise ValueError(\"aucun membre n'est disponible\")\n\n if membre.tenu:\n raise ValueError(\"le membre {} tient déjà l'objet {} \".format(\n nom_membre, membre.tenu))\n\n if objet is None:\n raise ValueError(\"l'objet passé en paramètre est None. Pour \" \\\n \"retirer un objet tenu, utilisez la méthode retirer_objet\")\n\n membre.tenu = objet\n objet.contenu = self.tenus", "def moi(self):\n\n pass", "def nouvellePartie(self):\n for i in range(0, 3):\n for j in range(0, 3):\n self.partie.uplateau[i, j].initialiser()\n self.canvas_uplateau[i, j].delete('pion')\n self.afficher_message(\" Nouvelle Partie\")", "def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)", "def presenetCar():", "def lancement_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n number_of_ships: int):\n # Victoire devient True quand un joueur détruit tout les bateaux adverse\n victoire = False\n # PHASE 1 PLACEMENT BATEAU\n\n positionner_bateau(joueur1, number_of_ships)\n positionner_bateau(joueur2, number_of_ships)\n\n # PHASE 2 VERIFICATION DE L ETAT DES BATEAUX\n while not victoire:\n\n if number_of_ships == 3:\n petite_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 1:\n test_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 5:\n grande_partie(joueur1,\n joueur2,\n tableau_invisible_joueur1, tableau_invisible_joueur2,\n )\n\n if verif_win(joueur2, number_of_ships):\n victoire = True\n print(\"le joueur 1 a gagné\")\n\n if verif_win(joueur1, number_of_ships):\n victoire = True\n print(\"le joueur 2 a gagné\")\n\n envoi_score(joueur1, joueur2)\n afficher_score(joueur1, joueur2)", "def __init__(self,obj):\n self.nature_libelle = obj['NatureLibelle']\n self.ins_nom = obj['InsNom']\n self.ins_numero_install = obj['InsNumeroInstall']\n self.equipement_id = obj['EquipementId']", "def run_choucas(self):\n \n if self.dlg.cb_fondcarte.isChecked():\n # On affiche un fond de carte de la France par défaut\n # S'il n'est pas déjà afficher\n estAffiche = False\n for lyr in QgsMapLayerRegistry.instance().mapLayers().values():\n if lyr.name() == \"France\":\n estAffiche = True\n break \n if not estAffiche :\n uri = os.path.join(os.path.dirname(__file__) + str('/resources/fond_de_carte/france/'),'France.shp')\n #fond_path = ':/plugins/PluginChoucas/fond_de_carte/France.shp'\n \n vlayer = QgsVectorLayer(uri, \"France\", \"ogr\")\n # symbol = QgsMarkerSymbolV2.createSimple({'name': '', 'color': 'land' })\n # vlayer.rendererV2().setSymbol(symbol)\n QgsMapLayerRegistry.instance().addMapLayer(vlayer) \n \n \n # On recupere l'emprise selectionee\n selectedEmpriseIndex = self.dlg.comboEmprise.currentIndex()\n selectedEmprise = self.cleEmprise[selectedEmpriseIndex]\n tabCleEmprise = selectedEmprise.split(\"#\")\n \n if self.dlg.comboEmprise.count() == 0:\n typeemprise = ''\n codeemprise = ''\n else:\n typeemprise = tabCleEmprise[0]\n codeemprise = tabCleEmprise[1]\n \n \n # On recupere la source selectionee\n selectedSourceIndex = self.dlg.comboSources.currentIndex()\n selectedSource = self.list_sources[selectedSourceIndex]\n self.source = selectedSource\n \n # On recupere l entite selectionee\n selectedEntityIndex = self.dlg.comboEntities.currentIndex()\n selectedEntity = self.list_entities[selectedEntityIndex]\n self.entity = selectedEntity\n \n # On definit le proxy\n util.chargeProxy(self.dlg.cb_proxy)\n \n # On calcule la bbox\n # Ecrins\n projection = util.get_first_object(self.root, self.source, 'projection')\n empriseSelect = util.get_first_object(self.root, self.source, 'emprise')\n # print (\"emprise catalogue pour la source \" + self.source + \" = \" + empriseSelect)\n if empriseSelect == 'none':\n card = None\n else:\n card = bbox.getBbox(typeemprise, codeemprise, projection)\n\n style = {}\n style['couleur'] = util.get_first_object(self.root, self.source, 'couleur')\n style['svg'] = util.get_second_object(self.root, self.source, self.entity, 'svg')\n style['size'] = util.get_second_object(self.root, self.source, self.entity, 'size')\n style['forme'] = util.get_second_object(self.root, self.source, self.entity, 'forme')\n \n typeGeom = util.get_second_object(self.root, self.source, self.entity, 'geom')\n urlQGis = typeGeom + '?crs=' + projection\n\n # Si le mode choisi est [En ligne]\n if self.dlg.rb_online.isChecked():\n \n # On recupere dans le catalogue \n flux = util.get_first_object(self.root, self.source, 'flux')\n if flux == 'API':\n urlBrute = util.get_second_object(self.root, self.source, self.entity, 'url')\n else:\n urlBrute = ''\n \n attributs = util.get_attributs(self.root, self.source, self.entity)\n \n nomLayer = self.source + '-' + self.entity + '-' + codeemprise\n \n if self.source == 'osm':\n \n filtres = util.getFiltres(self.root, self.source, self.entity)\n osm.loadOSM(nomLayer, card, filtres, attributs, style)\n \n elif self.source == 'rando.ecrins-parcnational.fr' and typeGeom == 'Point':\n \n geotrek.loadPointEcrin(style)\n \n # QMessageBox.information(None, \"OUPS:\", \n # 'Departement non disponible pour cet API')\n else:\n if card == None:\n url = urlBrute\n else:\n # Par BBOX\n url = online.getUrlWithBBox(card, urlBrute)\n # On ajoute la couche\n online.displayLayer(url, urlQGis, self.source, nomLayer, typeGeom, attributs, style)\n \n # Si le mode choisi est [Hors ligne]\n if self.dlg.rb_offline.isChecked():\n offline.offline(self.root, self.source, self.entity)\n \n if self.dlg.rb_local.isChecked():\n \n urlBrute = util.get_second_object(self.root, self.source, self.entity, 'url')\n nomLayer = self.source + '-' + self.entity + '-' + codeemprise\n # On prend tous les attribts quand c'est un fichier\n local.loadPoint(nomLayer, urlBrute, urlQGis, card, style)", "def perimeter(self):", "def calculatePieces(self):\n pass;", "def obj(self) -> object:\n pass", "def get_objects_data(self):\n pass", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r", "def post_init(self):\n val = get_constant(\"pi_invention.pieces_a_joindre\")\n pieces_a_joindre = FieldSet(\"pieces_a_joindre\", \"Pièces à joindre\", [HTML(val)])\n self.fieldsets += [pieces_a_joindre]\n\n conditions = get_constant(\"pi_invention.conditions\")\n self.conditions = conditions", "def auto_rivet():\n sel_list = pm.ls(sl=1)\n\n # the last selection is the mesh\n objects = sel_list[:-1]\n geo = sel_list[-1]\n\n # get the closest point to the surface\n geo_shape = geo.getShape()\n\n follicles = []\n\n for obj in objects:\n # pivot point of the obj\n pivot = obj.getRotatePivot(space='world')\n uv = geo_shape.getUVAtPoint(pivot, space='world')\n\n # create a hair follicle\n follicle = pm.nt.Follicle()\n follicles.append(follicle)\n follicle.simulationMethod.set(0)\n geo_shape.worldMatrix >> follicle.inputWorldMatrix\n geo_shape.outMesh >> follicle.inputMesh\n follicle.parameterU.set(uv[0])\n follicle.parameterV.set(uv[1])\n\n # parent the object to the follicles transform node\n follicle_transform = follicle.getParent()\n\n follicle.outTranslate >> follicle_transform.translate\n follicle.outRotate >> follicle_transform.rotate\n\n pm.parent(obj, follicle_transform)\n\n return follicles", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def afficherOBJ(self):\r\n str_abr = self.abr.afficher()## appel de l'affichage d'un ABR\r\n return str(self.debut)+':'+str(self.fin)+';'+str_abr ##concaténation du resultat\r", "def afficher(self, personnage, jeu, partie):\n en_main = jeu.en_main.get(personnage)\n tableau = jeu.tableau\n if en_main:\n msg = \"Dans votre main, vous avez {} et {}.\".format(\n en_main[0].nom_complet_indefini,\n en_main[1].nom_complet_indefini)\n else:\n msg = \"Vous n'avez encore rien dans votre main.\"\n \n if tableau:\n tableau = [piece.nom_complet_indefini for piece in tableau]\n aff_tableau = \", \".join(tableau[:-1]) + \" et \" + tableau[-1]\n msg += \"\\nSur le tableau se trouve {}.\".format(aff_tableau)\n\n if partie.tour is personnage:\n msg += \"\\nC'est votre tour.\"\n \n return msg", "def part_id(self):\n ...", "def part(self):\n return self._modelPart", "def __str__(self):\r\n return self.afficherOBJ()", "def test_data_object_vaporise(self):\n pass", "def other_object(img):\n return img[500:570, 1000:1070]", "def object_for(objectid):", "def properties(self):", "def properties(self):", "def properties(self):", "def _blob(self):\n self.__rewrite_sldIdLst()\n # # at least the following needs to be added before using\n # # _reltype_ordering again for Presentation\n # self.__rewrite_notesMasterIdLst()\n # self.__rewrite_handoutMasterIdLst()\n # self.__rewrite_sldMasterIdLst()\n return super(Presentation, self)._blob", "def get_obj():\n idPadre = request.args.get('id')\n nombrePadre = nombreObjeto(idPadre)\n global variablesActualizables\n variablesActualizables = -1\n print(\"consulta objetos id: {}, padre: {}\".format(idPadre,nombrePadre))\n\n try:\n objetos = g.db.query(Objetos).filter_by(NombreObjetoPadreObjeto=nombrePadre).filter_by(Activo=True).all()\n except NoResultFound:\n print \"NO resultados para el padre {}\".format(nombrePadre)\n objetos = []\n\n try:\n funciones = g.db.query(Funciones).filter_by(NombreObjetoPadreFuncion=idPadre).all()\n except NoResultFound:\n print \"NO funciones para el padre {}\".format(nombrePadre)\n funciones = []\n\n try:\n variables = g.db.query(Variables).filter_by(NombreObjetoPadreVariable=idPadre).all()\n except NoResultFound:\n print \"NO variables para el padre {}\".format(nombrePadre)\n variables = []\n if nombrePadre != None:\n Padre=\"Elementos de {}\".format(nombrePadre)\n else:\n Padre=\"Objetos Activos\"\n return render_template('show_objects.html',padre=Padre, objetos=objetos, funciones=funciones, variables=variables)", "def present(self):", "def fields(self):", "def __init__(self):\n mi_parqueo = list()", "def part_6():\n\n raise NotImplementedError", "def Item(self) -> object:", "def Item(self) -> object:", "def COMBI(self):\n\n self.FB()\n self.LAT()\n self.ROT()", "def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True", "def __init__(self):\n self.tours = []\n self.grille = Grille()", "def get_object_to_run(self):", "def __init__(self, piece):\n self.piece = piece", "def afficher_carte(self):\n print(self)", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n\n if profile == obj.club.secy:\n return True\n\n if profile in obj.club.joint_secy.all():\n return True\n\n if profile == obj.club.council.gensec:\n return True\n\n if profile in obj.club.council.joint_gensec.all():\n return True\n\n return False", "def get_mvts(self, plateau):\n if self.type == \"p\": #Pion\n if self.color == \"w\":\n diags = [[self.x-1, self.y+1],[self.x+1, self.y+1]] #Mouvements possibles de diagonales\n faces = [[self.x, self.y+1]] #Mouvements possibles de face\n if not self.moved: #Si le pion n'a pas encore bougé de la partie\n faces.append([self.x, self.y+2])\n else:\n diags = [[self.x-1, self.y-1], [self.x+1, self.y-1]]\n faces = [[self.x, self.y-1]] #Mouvements possibles de \n if not self.moved:\n faces.append([self.x, self.y-2])\n pos = [] #Position de déplacement validées\n for d in diags:\n if verif_case(d[0], d[1]): #Si la case est sur le plateau \n pion = plateau.get_pion(d[0],d[1])\n if pion != None and pion.color != self.color: #Si il y a un pion ennemi\n pos.append(d)\n for f in faces: \n if verif_case(f[0],f[1]):\n pion = plateau.get_pion(f[0], f[1])\n if pion == None: #Si il n'y a pas de pion\n pos.append(f)\n return pos\n elif self.type == \"t\": #Tour\n pos = []\n dir = [[1,0],[-1,0],[0,1],[0,-1]] #4 directions possibles\n for d in dir:\n x,y = self.x+d[0],self.y+d[1] #Projection de position\n while verif_case(x,y): #Tant que (x, y) est sur le plateau\n pion = plateau.get_pion(x, y)\n if pion != None: #Si il y a un pion\n if pion.color != self.color: #Si il n'est pas allié\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"c\": #Cavalier\n l = [-2,-1,1,2]\n mvts = [[x,y] for x in l for y in l if abs(x)!=abs(y)]\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([x, y])\n return pos\n elif self.type == \"f\": #Fou\n dir = [[1,1],[-1,1],[-1,-1],[1,-1]]\n pos = []\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != self.color:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"k\": #Roi\n mvts = [[1,0],[-1,1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]] #4 mouvements possibles\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x, y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([self.x + m[0], self.y + m[1]])\n return pos\n elif self.type == \"q\": #Dame\n pos = []\n dir = [[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]]\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != joueur:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos", "def cliquer_sur_unité(self):", "def __init__(self,donnee_admin):\n self.ref = donnee_admin[0]\n self.date_saisie = donnee_admin[1]\n self.remplissage = donnee_admin[2]\n self.date_entree = donnee_admin[3]\n self.date_sortie = donnee_admin[4]", "def area(self):", "def actualise_fenetre(self,plateau,fenetre,joueur,info,bouton,etape_texte):\n\n self.affiche_plateau(plateau,fenetre)\n liste_im_joueur = [pygame.image.load(\"joueur\"+str(i)+\".png\").convert_alpha() for i in range(1,5)]\n for i in range (4) :\n x_joueur = 60\n y_joueur = 60\n liste_im_joueur[i] = pygame.transform.scale(liste_im_joueur[i], (int(x_joueur),int(y_joueur)))\n\n for i in range(len(plateau.dico_joueurs)) :\n fenetre.blit(liste_im_joueur[i],(1030,320+i*80))\n fenetre.blit(police_small.render(str(plateau.dico_joueurs[i].nom) + \" : \",False,pygame.Color(\"#000000\")),(800,340+i*75))\n fenetre.blit(police1.render(\"Score : \"+str(plateau.dico_joueurs[i].points),False,pygame.Color(\"#000000\")),(800,340+i*75+15))\n fenetre.blit(police1.render(\"Ordre de mission : \"+str(sorted(plateau.dico_joueurs[i].fantome_target)),False,pygame.Color(\"#000000\")),(800,340+i*75+30))\n fenetre.blit(police1.render(\"Jokers restants : \"+str(plateau.dico_joueurs[i].nb_joker),False,pygame.Color(\"#000000\")),(800,340+i*75+45))\n \n #test texte pour afficher le joueur qui joue\n if self.turn == True :\n fenetre.blit(police.render(\"C'est à vous de jouer\",False,pygame.Color(0,0,0)),(800,240))\n else:\n fenetre.blit(police.render(\"C'est le tour de votre adversaire\",False,pygame.Color(0,0,0)),(800,240))\n \n #affichage du message d'erreur\n for i in range(len(info)) : \n fenetre.blit(police.render(info[i],False,pygame.Color(\"#000000\")),(760,180+i*20))\n \n fenetre.blit(police.render(etape_texte,False,pygame.Color(\"#000000\")),(760,160))\n \n \n bouton.draw(fenetre)\n \n pygame.display.flip()", "def partid(self): # -> None:\n ...", "def extra_object_files(self):", "def chercherChemin(self):\n\n \n liste=self._circuit.vue(self.x,self.y,self.rayonVision)\n \n listeSuppr=[]\n couche_vehicule= self._circuit.Couche_vehicules\n \n for case in liste :\n #on élimine les cases infranchissbles les cases qui ne sont pas sur le chemin à suivre \n\n if self._circuit.numeroWayPoint(case[0],case[1])==0 or ( self._circuit.numeroWayPoint(self.x,self.y)!=self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])<= self._circuit.numeroWayPoint(self.x,self.y)) or( self._circuit.numeroWayPoint(case[0],case[1])>= 5*self._circuit.numeroWayPoint(self.x,self.y) and self._circuit.numeroWayPoint(self.x,self.y)!=0) or ( self._circuit.numeroWayPoint(self.x,self.y)==self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])== self._circuit.numeroWayPoint(self.x,self.y)) or self._circuit.plateau[case[1],case[0],couche_vehicule]!=None:#on élimine les points derrière\n \n listeSuppr.append(case)\n\n \n for case in listeSuppr:\n \n liste.remove(case)\n \n if len(liste)>=1:\n l=liste[0]\n\n for nour in liste :\n \n if distance((self.x,self.y),(l[0],l[1])) > distance((self.x,self.y),(nour[0],nour[1])):\n l=nour\n pasx=0\n pasy=0\n if self.x<l[0] : \n pasx=1\n elif self.x>l[0] :\n pasx=-1\n if self.y<l[1] : \n pasy=1\n elif self.y>l[1] :\n pasy=-1\n debug.dprint(\" id {} {}:({},{}) Waypoint {} Point:({},{}) WayPoint {} vitesse :{} reservoir:{}\".format(self.id,self.typeV,self.x,self.y,self._circuit.numeroWayPoint(self.x,self.y),l[0],l[1],self._circuit.numeroWayPoint(l[0],l[1]),self.vitesse,self.reservoir))\n self.orientation=atan2(pasy,pasx)\n\n self.vitesse=1\n\n debug.dprint(self) \n \n super().deplacer()\n \n\n self.rayonVision=4\n else :# on augemente le rayon de vision au cas ou toutes les cases sont occupées ou non franchissables\n self.rayonVision*=3", "def get(self, obj):", "def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary", "def test_data_object_get_details(self):\n pass", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n img_size = img.size\n img_size = (400,400)\n\n loader = loadjson\n \n data = loader(txt, self.objectsofinterest,img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float() \n\n if len(points_all) == 0:\n points_all = torch.zeros(1, 10, 2).double()\n \n # self.save == true assumes there is only \n # one object instance in the scene. \n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1,3).float()\n rotations = torch.zeros(1,4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name,'_camera_settings.json')\n with open(path_cam) as data_file: \n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3,3))\n matrix_camera[0,0] = cam['fx']\n matrix_camera[1,1] = cam['fy']\n matrix_camera[0,2] = cam['cx']\n matrix_camera[1,2] = cam['cy']\n matrix_camera[2,2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name,'_object_settings.json')\n with open(path_set) as data_file: \n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy() \n\n \n def Reproject(points,tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0]/2, img.size[1]/2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n \n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test: \n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n \n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key,(12, 115, 170),7) \n \n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1,point2],fill=lineColor,width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0]-pointRadius, point[1]-pointRadius, point[0]+pointRadius, point[1]+pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color = 0, color = None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color \n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) #lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n \n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n \n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255,255,255), pointRadius = 3)\n DrawDot(points[1], pointColor=(0,0,0), pointRadius = 3)\n\n # Draw all the found objects. \n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n \n return {\n \"img\":img,\n \"translations\":translations,\n \"rot_quaternions\":rotations,\n 'pointsBelief':np.array(points_all[0]),\n 'matrix_camera':matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name':name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap(\n img, \n pointsBelief=pointsBelief,\n nbpoints = 9,\n sigma = self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg),beliefsImg[0].size(1),beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n \n\n # Create affinity maps\n scale = 8\n if min (img.size) / 8.0 != min (img_size)/8.0:\n # print (scale)\n scale = min (img.size)/(min (img_size)/8.0)\n\n affinities = GenerateMapAffinity(img,8,pointsBelief,objects_centroid,scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0]+1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1]+1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0],self.normal[0],self.normal[0]),\n (self.normal[1],self.normal[1],self.normal[1])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n \n img = crop(img,h_crop,w_crop,img_size[1],img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop/8)\n h_crop = int(h_crop/8)\n\n affinities = affinities[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n beliefs = beliefs[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,1,50)],dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,50,1)],dim=2)\n\n return {\n 'img':img, \n \"affinities\":affinities, \n 'beliefs':beliefs,\n }", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - Main', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-01-C - Location', 190))\n self.assertIsInstance(self.m_pyhouse_obj, PyHouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House.Location, LocationInformationPrivate)", "def __init__(self, altura, peso, edad):\n\t\tself.altura = altura # OJO TODAS LAS VARIABLES SON PUBLICAS \n\t\tself.peso = peso \n\t\tself.edad = edad\n\t\tself.profesion = \"\" # esta la inicializamos nosotros\n\t\tself.lista_tareas = []\n\t\tself.__privado = 1 # este atributo es privado no podemos acceder a el desde fuera", "def nextIntersectors(self, inter):\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n otherFInters = self.getIntersectorList(inter.f.vertices)\n # First intersector\n pInters = self.getIntersectorList(inter.pe.pFace.vertices)\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n pInters),\n None)\n if otherI1 is None:\n # The pFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the pFace\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.pe.pFace,\n otherFInters),\n None)\n if otherI1 is None:\n # polyhedron(inter.f.vertices + inter.pe.pFace.vertices,\n # inter.pe.pFace.edges() + inter.f.edges(),\n # [inter.f, inter.pe.pFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, '\\n\\n', inter.pe.pFace)\n assert all(v in self.poly1.vertices for v in inter.f.vertices) or all(v in self.poly2.vertices for v in inter.f.vertices)\n assert self.poly1.facesInVertices() and self.poly2.facesInVertices()\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n assert self.poly1.nonDoubleVertices() and self.poly2.nonDoubleVertices()\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, pInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.f.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, pInters)])\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, otherFInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.pe.pFace.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, otherFInters)])\n raise ValueError('No intersector found')\n # Second intersector\n nInters = self.getIntersectorList(inter.pe.nFace.vertices)\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n nInters),\n None)\n if otherI2 is None:\n # The nFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the nFace\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.pe.nFace,\n otherFInters),\n None)\n if otherI2 is None:\n polyhedron(inter.f.vertices + inter.pe.nFace.vertices,\n inter.pe.nFace.edges() + inter.f.edges(),\n [inter.f, inter.pe.nFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, inter.pe.pFace)\n raise ValueError('No intersector found')\n inter.adjacents = (otherI1, otherI2)\n return (otherI1, otherI2)", "def extract(self):\n pass", "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "def item_perceel_crab_adapter(obj, request):\n return {\n 'id': obj.id,\n 'centroid': obj.centroid,\n 'postadressen': obj.postadressen,\n 'metadata': {\n 'begin_tijd': obj.metadata.begin_tijd,\n 'begin_datum': obj.metadata.begin_datum,\n 'begin_bewerking': {\n 'id': obj.metadata.begin_bewerking.id,\n 'naam': obj.metadata.begin_bewerking.naam,\n 'definitie': obj.metadata.begin_bewerking.definitie\n },\n 'begin_organisatie': {\n 'id': obj.metadata.begin_organisatie.id,\n 'naam': obj.metadata.begin_organisatie.naam,\n 'definitie': obj.metadata.begin_organisatie.definitie\n }\n }\n }", "def test_nbands_vrt_object(self):\n self.assertEqual(_test_object(landsat_vrt)[0], 2)", "def __init__(self, realpart, imagpart):\n self.r = realpart\n self.i = imagpart", "def CL(self):", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def partid(self): # -> Unknown:\n ...", "def _addPredicate(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n obj = None\n aux = list()\n auxlabel = \"\"\n # 1st round find absolute subject & object\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict:\n sub = child\n elif child.func in ObjDict:\n obj = child\n\n # 2nd round find potential subject & object with aux.\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict or child.func in ObjDict:\n continue\n elif child.func in ObjPostDict:\n if not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in SubPassiveObjDict:\n if parent.passive == 1:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in ObjPassiveSubDict:\n if parent.passive == 1:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n\n if parent.passive == 0:\n # Add parent and subject.\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif sub:\n # parent.main = \"<{0}>[NONE]{1}\".format(sub.main, parent.main)\n # elif obj:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, obj.main)\n if sub:\n parent.main = \"<{0}>{1}\".format(sub.main, parent.main)\n self._addNode(parent, sub=sub.main)\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(sub.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add object.\n if obj:\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(parent.main, obj.main, label=\"客体\\n\" + auxlabel, etype=\"obj\")\n else:\n # Add obj as sub\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif obj:\n # parent.main = \"<{0}>[NONE]{1}\".format(obj.main, parent.main)\n # elif sub:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, sub.main)\n if obj:\n parent.main = \"<{0}>{1}\".format(obj.main, parent.main)\n self._addNode(parent, sub=obj.main)\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(obj.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add sub as obj\n if sub:\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(parent.main, sub.main, label=\"客体\\n\", etype=\"obj\")\n # # Add obj as aux.\n # if obj:\n # aux.append(obj.id)\n # auxlabel += \"[{0}]\\n\".format(obj.surface)\n self._processAux(aux, parent.main, chunks)", "def writeGroundPlane(self,obj,renderer):\n\n result = \"\"\n bbox = FreeCAD.BoundBox()\n for view in obj.Group:\n if view.Source and hasattr(view.Source,\"Shape\") and hasattr(view.Source.Shape,\"BoundBox\"):\n bbox.add(view.Source.Shape.BoundBox)\n if bbox.isValid():\n import Part\n margin = bbox.DiagonalLength/2\n p1 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMin-margin,0)\n p2 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMin-margin,0)\n p3 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMax+margin,0)\n p4 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMax+margin,0)\n\n # create temporary object. We do this to keep the renderers code as simple as possible:\n # they only need to deal with one type of object: RenderView objects\n dummy1 = FreeCAD.ActiveDocument.addObject(\"Part::Feature\",\"renderdummy1\")\n dummy1.Shape = Part.Face(Part.makePolygon([p1,p2,p3,p4,p1]))\n dummy2 = FreeCAD.ActiveDocument.addObject(\"App::FeaturePython\",\"renderdummy2\")\n View(dummy2)\n dummy2.Source = dummy1\n ViewProviderView(dummy2.ViewObject)\n FreeCAD.ActiveDocument.recompute()\n\n result = self.writeObject(dummy2,renderer)\n\n # remove temp objects\n FreeCAD.ActiveDocument.removeObject(dummy2.Name)\n FreeCAD.ActiveDocument.removeObject(dummy1.Name)\n FreeCAD.ActiveDocument.recompute()\n\n return result", "def test_api_object_partial_property(self, api_object):\n api_object.status = 'PARTIAL'\n assert api_object.partial\n assert not api_object.creating", "def addReco(obj1,obj2):\n px = obj1.px + obj2.px\n py = obj1.py + obj2.py\n pz = obj1.pz + obj2.pz\n E = obj1.E + obj2.E\n return Reco(px,py,pz,E)", "def reckon(self):", "def __init__(self, diccionario):\n self.numero = diccionario['numero']\n self.nombre = diccionario['equipo_nombre']\n self.pokmov = lectores.pokemon_y_movimiento_a_tuplas(diccionario)", "def is_full(self):", "def get_product_area(self, obj):\n\t\treturn obj.product.product", "def get_object(self, img):\n objectType = 'none'\n colourProb = np.ones((3,)) / 3.0\n distance = 0.0\n angle = 0.0\n self.patternFound = False\n \n #patternFound, corners = self.get_corners(img)\n self.get_corners(img)\n \n if (self.patternFound):\n \n # Determine if the object is horizontal or vertical\n delta_x = abs(self.corners[0,0,0] - self.corners[2,0,0])\n delta_y = abs(self.corners[0,0,1] - self.corners[2,0,1])\n horizontal = (delta_y > delta_x)\n if (horizontal):\n objectType = 'horizontal'\n else:\n objectType = 'vertical'\n \n # Compute distances and angles\n if (horizontal):\n height = ((abs (self.corners [0,0,1] - self.corners [2,0,1]) +\n abs (self.corners [9,0,1] - self.corners [11,0,1])) / 2.0);\n \n patternHeight = (self.patternSize[0]-1.0) * self.patternUnit\n \n else:\n height = (abs (self.corners [0,0,1] - self.corners [9,0,1]) +\n abs (self.corners [2,0,1] - self.corners [11,0,1])) / 2.0;\n \n patternHeight = (self.patternSize[1]-1.0) * self.patternUnit\n \n #distance = 1.0 / (0.0001 * height);\n distance = self.intrinsic_matrix[1,1] * patternHeight / (height * 10.0) \n\n center = (self.corners [0,0,0] + self.corners [2,0,0] +\n self.corners [9,0,0] + self.corners [11,0,0]) / 4.0;\n \n #angle = 0.0018 * center - 0.6425;\n #angle *= -1.0;\n angle = -np.arctan2(center - self.intrinsic_matrix[0,2], self.intrinsic_matrix[0,0])\n \n \n #### Classify object by colour\n \n # Extract rectangle corners\n points = np.array(\n [self.corners [0],\n self.corners [2],\n self.corners [9],\n self.corners [11]]\n )\n \n points.shape = (4,2)\n points = np.int32(points)\n\n # Compute region of interest\n mask = np.zeros((self.imageSize[1], self.imageSize[0]), dtype =np.uint8) \n cv2.fillConvexPoly (mask, points, (255, 255, 255)) \n \n # Compute mean colour inside region of interest\n mean_colour = cv2.mean(img, mask);\n \n red = mean_colour[2];\n green = mean_colour[1];\n blue = mean_colour[0];\n sum = red + green + blue;\n\n colourProb[0] = red / sum;\n colourProb[1] = green / sum;\n colourProb[2] = blue / sum;\n \n \n return objectType, distance, angle, colourProb", "def partid(self): # -> int:\n ...", "def get_object(id):", "def formulate_plan(self):\n return NotImplemented", "def fields(self):\n ...", "def _publish_objects(self):\n\n for obj in self._cozmo.world.visible_objects:\n now = rospy.Time.now()\n x = obj.pose.position.x * 0.001\n y = obj.pose.position.y * 0.001\n z = obj.pose.position.z * 0.001\n q = (obj.pose.rotation.q1, obj.pose.rotation.q2, obj.pose.rotation.q3, obj.pose.rotation.q0)\n self._tfb.send_transform(\n (x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame\n )\n \n try:\n if obj.cube_id and self.target_cube != obj:\n self._tfb.send_transform((x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame)\n print(\"Found {}\".format(obj.cube_id))\n if not self.cube_found and self.robots_distance_to_object(self._cozmo, obj) < 400:\n self.target_cube = obj\n self.cube_found = True\n print(\"Locking on to {}\".format(obj.cube_id))\n else:\n if self.cube_found:\n print(\"Found that one already!\")\n else:\n print(\"Cube too far away!\")\n \n except:\n # print('OBJECT IS NOT A LIGHT CUBE')\n if(obj==self._cozmo.world.charger):\n return\n if(obj.object_type==CustomObjectTypes.CustomType00 and (self.front_wall_pose == None or not self.front_wall_pose.is_accurate)):\n self.front_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Front', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType01 and (self.ramp_bottom_pose == None or not self.ramp_bottom_pose.is_accurate)):\n self.ramp_bottom_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Ramp', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType03 and (self.drop_spot_pose == None or not self.drop_spot_pose.is_accurate)):\n self.drop_spot_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Drop', self._odom_frame)\n print('*** Comzmo has found the drop Spot! ***')\n if(obj.object_type==CustomObjectTypes.CustomType04 and (self.back_wall_pose == None or not self.back_wall_pose.is_accurate)):\n self.back_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Back', self._odom_frame)\n print('*** Comzmo has found the back wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType05 and (self.drop_target_pose == None or not self.drop_target_pose.is_accurate)):\n self.drop_target_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Target', self._odom_frame)\n print('*** Comzmo has found the Dropt Target! ***')\n if(obj.object_type==CustomObjectTypes.CustomType06 and (self.drop_clue_pose == None or not self.drop_clue_pose.is_accurate)):\n self.drop_clue_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Clue', self._odom_frame)\n print('*** Comzmo has found the Dropt Clue! ***')", "def representation(self) -> DrawingObjects:\n pass", "def area(self):\n ...", "def test_get_composition(self):\n pass", "def __init__(self, marqueur, allele, hauteur, informatif):\n\n self.marqueur = marqueur\n self.allele = allele\n self.hauteur = hauteur\n self.informatif = informatif", "def get_representative_data_object(self, obj):\n if self.dim == 0:\n # In this way, obj can be a data object and this class can be\n # used even if the assignment is not between \"flattened components\"\n return obj\n else:\n nominal_index = self.nominal_index\n return obj[nominal_index]" ]
[ "0.6027109", "0.5971558", "0.58855444", "0.58450836", "0.5830027", "0.58068246", "0.5640876", "0.55412936", "0.55015695", "0.5419981", "0.53639406", "0.53315765", "0.5311751", "0.5300423", "0.5263753", "0.5260159", "0.52363807", "0.5175996", "0.51674426", "0.5153223", "0.5149583", "0.51494414", "0.50971997", "0.5079156", "0.50747925", "0.50649875", "0.505392", "0.5053067", "0.5029702", "0.5021705", "0.5011506", "0.5011506", "0.4998115", "0.49938592", "0.49812505", "0.49807793", "0.49649987", "0.49637502", "0.49522346", "0.4944624", "0.49399456", "0.49399456", "0.49399456", "0.4934664", "0.49281743", "0.49186286", "0.49087396", "0.4901409", "0.4888369", "0.4846917", "0.4846917", "0.48448032", "0.4843826", "0.48354828", "0.48310697", "0.48243496", "0.48227638", "0.48138115", "0.48136762", "0.4809829", "0.4806924", "0.47989476", "0.4796737", "0.47950587", "0.47945714", "0.47934988", "0.47900578", "0.4787772", "0.4786216", "0.47845063", "0.4779746", "0.47734818", "0.47689453", "0.476456", "0.47634026", "0.4762088", "0.47588068", "0.47463885", "0.47438294", "0.4741544", "0.47409165", "0.47400483", "0.47369465", "0.4734705", "0.47242036", "0.47197634", "0.47167602", "0.4715259", "0.47144356", "0.47132477", "0.47094613", "0.47082484", "0.47052252", "0.4703704", "0.47030944", "0.47029427", "0.46986896", "0.4693263", "0.46922907", "0.4691869", "0.4687016" ]
0.0
-1
Saves table's data source to given path
def save(self, export_path: str):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(file, table):\n pq.write_table(pa.Table.from_pandas(table), file)", "def save(self):\r\n debug.write(\"[SourceRPG] Handling SQL Save\", 1)\r\n if self.path != \":memory:\":\r\n debug.write(\"Path is not in memory\", 2, False)\r\n if currentTurboMode is False:\r\n debug.write(\"We are not in turbo mode\", 2, False)\r\n self.connection.commit()\r\n debug.write(\"[SourceRPG] SQL Save handled\", 1)", "def save(self, labpath: str) -> None:\n self._table.to_csv(labpath, index=False)\n print(\"# Save experimental data into {0}\".format(labpath))", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def write_table(table, file_path):\n\n\twith open(file_path, 'w') as file:\n\t\tfile.write(table)", "def save_table(date, table):\n if os.path.isfile(date+\".table\"):\n file_using = open(date+\".table\", \"w\")\n else:\n return False\n file_using.seek(0)\n file_using.truncate()\n for line in table:\n file_using.write(\"{},{},{},{},{}\\n\".format(line[0], line[1], line[2], line[3], line[4]))\n file_using.close()", "def save(self, path: str):\n pass", "def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])", "def save_dataloader(self, path: Union[str, Path]) -> None:\n if isinstance(path, str):\n path = Path(path)\n joblib.dump(self, path)", "def save_table(data, out_file):\n logging.info(\"Saving table\")\n #header, data = data\n #out = pd.DataFrame(data=data, columns = header.keys())\n joblib.dump(data, out_file)", "def saveData(self):\n\n\n path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', os.getcwd())\n\n if path[0] != '':\n\n filepath, filename = os.path.split(path[0])\n\n if os.path.exists(filepath):\n\n self.getCurrentPanda().saveData(path[0])", "def save(path_to_model):\n pass", "def store_hive_table(data, directory, file_name):\n table_name = directory + \".\" + file_name\n data.write.saveAsTable(table_name)", "def save_indicator(table, target_path, var_name, geo):\n table.to_csv(f\"{target_path}/{var_name}.{geo}.csv\", index=False)", "def save_model(self, path):\n pass", "def save(df, path):\n \n # Extract the directory and filename from the given path\n directory = os.path.split(path)[0]\n filename = os.path.split(path)[1]\n if directory == '':\n directory = '.'\n \n # If the directory does not exist, create it\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n # The final path to save to\n savepath = os.path.join(directory, filename)\n \n # Save the dataset\n sampled_frame.to_csv(savepath, index=False)", "def save_path(self):\n raise NotImplementedError", "def save_path(self):\n raise NotImplementedError", "def save_db(self) -> None:", "def save():", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def save_table_scraperwiki(uniques,table,name):\n for row in table:\n scraperwiki.sqlite.save(\n unique_keys=uniques\n , data=row\n , table_name=name\n )", "def save_datatable(datatable: ModelDatatable, folder_name):\n result = {RESULT_CODE_KEY: 0,\n RESULT_MESSAGE_KEY: \"success save datatable: \" + datatable.get_datatable_name()}\n\n # create folder if needed\n Path(folder_name).mkdir(parents=True, exist_ok=True)\n\n # check if file with datatable_name already existing and delete if existing\n path_to_file = folder_name + \"/\" + datatable.get_datatable_name()\n path = Path(path_to_file)\n if path.is_file():\n # remove file\n os.remove(path_to_file)\n\n try:\n # connect to database\n connection = sqlite3.connect(path_to_file)\n logger_log(\"established db connection. \" + \"sqlite3.version: \" + sqlite3.version)\n\n # create\n result = create_datable(connection, datatable, folder_name)\n # if not successful\n if not result[RESULT_CODE_KEY] == 0:\n return result\n\n #\n # insert data\n #\n logger_log(\"now try insert data into datatable with name: \" + datatable.get_datatable_name())\n\n # for all rows\n for row in datatable.get_rows():\n # get sql statement\n sql_statement = get_sql_statement_insert_data_for_row(row)\n # get cursor\n cursor = connection.cursor()\n # execute\n cursor.execute(sql_statement)\n logger_log(\"success insert row.\")\n\n result = {RESULT_CODE_KEY: 0,\n RESULT_MESSAGE_KEY: \"success save datatable: \" + datatable.get_datatable_name()}\n except Error as e:\n logger_log(\"failed. db operation with error: \" + e.args[0])\n result[RESULT_CODE_KEY] = -1\n result[RESULT_MESSAGE_KEY] = \"failed saving datatable. error: \" + e.args[0]\n finally:\n if connection:\n connection.commit()\n connection.close()\n logger_log(\"db connection closed: \" + datatable.get_datatable_name())\n logger_log(result[RESULT_MESSAGE_KEY])\n return result", "def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///' +database_filename)\n df.to_sql('Project2', engine, index=False)", "def do_save(self, arg):\n # If no data source selected, prompt user to do so.\n try:\n self._shw.save_data ()\n except ValueError as e:\n View.info (e)\n except (OSError, AttributeError) as e:\n View.error (e)\n except Exception as e:\n View.error (e)\n else:\n View.success (\"Data is saved\")", "def check_save_table_path(self, path):\r\n\r\n #check if path is empty and try to get something not empty\r\n if path == \"\":\r\n path = QFileDialog.getSaveFileName(self,caption = 'select a file to save to',\r\n directory = self.ui.saveTablePath.text(),\r\n filter=\"Text files (*.txt);;All files (*)\",\r\n options = QFileDialog.DontConfirmOverwrite)\r\n \r\n #in pyqt5 a tuple is returned, unpack it\r\n if os.environ['QT_API'] == 'pyqt5':\r\n path, _ = path\r\n \r\n if path == \"\":\r\n return \"\"\r\n else:\r\n self.ui.saveTablePath.setText(path)\r\n \r\n #do nothing if path already checked\r\n if self.saveTablePath == path:\r\n return path\r\n \r\n #if it already exists ask to overwrite or append\r\n if os.path.isfile(path):\r\n title = \"path already exists\"\r\n text = (path+\" already exists. Should it be overwritten? \\n\"\r\n + \"No will append to the file. \"\r\n + \"Choose Abort to select another file\")\r\n overwriteAnswer = QMessageBox.question(self,title,text,\r\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Abort,\r\n QMessageBox.No)\r\n #if selected no, append to the file, i.e. return the path as checked\r\n if overwriteAnswer == QMessageBox.No:\r\n return path\r\n #empty path return on abort\r\n elif overwriteAnswer == QMessageBox.Abort:\r\n return \"\"\r\n #with yes the method continues and will overwrite the file\r\n \r\n #should not be a dir\r\n elif os.path.isdir(path):\r\n logging.error(\"specified path is a directory\")\r\n return \"\"\r\n \r\n #in all other cases, open it for writing and write a header to it\r\n headerString = (\"film no\\t\" \"filename\\t\" \"x0\\ty0\\tx1\\ty1\\t\"\r\n \"number of pixels\\t\" \r\n \"R_avg\\tR_std\\t\" \"G_avg\\tG_std\\t\" \"B_avg\\tB_std\\n\")\r\n with open(path,\"w\") as saveTableFile:\r\n saveTableFile.write(headerString)\r\n \r\n return path", "def save(self,path=\"qtable.npy\"):\n np.save(path, self.Q)", "def save_data(df, database_filename):\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(\"YourTableName\", engine, index=False, if_exists=\"replace\")", "def save(\n self,\n modelSavePath\n ):\n pass", "def save_data(self):\n pass", "def _save(self, dataset, path, files, copy_files=False):\n raise NotImplementedError('Loader {} does not support saving datasets.'.format(self.type()))", "def save_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.saveEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def save(self, file_path):\n self.model.save(file_path)", "def save(self, file_path):\n self.model.save(file_path)", "def save(self, file_path):\n self.model.save(file_path)", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))", "def changeDataPath(self,path):\n self.dataPath = path", "def _save_to_file_ctrl(self):\n fname, _ = QtWidgets.QFileDialog.getSaveFileName(\n parent=self,\n caption=\"Save proxies to file\",\n filter=\"csv file (*.csv)\"\n )\n if fname:\n with open(fname, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\"Address\", \"Port\", \"Protocol\", \"Anonymity\"])\n for proxy in self._get_table():\n writer.writerow([\n proxy.address,\n proxy.port,\n proxy.protocol,\n proxy.anonymity\n ])", "def save_dataset_csv(self, path):\n cols = list(self.data_dict.keys())\n df = pd.DataFrame(self.data_dict, index=None, columns=cols)\n df.to_csv(path, index=True)", "def store(self, filename):", "def save_to_database(filename,key, df, metadata = {}):\n\t# Opening the dataframe\n\tstore = pd.HDFStore(filename)\n\t# Feeding the dataframe, 't' means table format (slightly slower but can be modified)\n\tstore.put(key, df, format=\"t\")\n\t# feeding the metadata\n\tstore.get_storer(key).attrs.metadata = metadata\n\t# /!\\ Important to properly close the file\n\tstore.close()", "def save_dataframe(state: State):\n\n try:\n state.games.df.to_csv(ROOT_PATH + \"/results/data/raw_data.csv\")\n LOGGER.debug(\"Successfully saved data in ../results/data/\")\n\n except Exception as e:\n LOGGER.error(f\"Could not save dataframe file - {e}\")", "def save(self, fname):\n pass", "def save(self, path):\n for i, m in enumerate(self.model_save):\n m.save(os.path.join(path, str(i) + \"-\" + m.name))", "def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)", "def save():\n pass", "def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)", "def saveData(self):\n pass", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def save_inventory(file_name, table):\r\n with open(file_name, 'w') as objFile:\r\n for cd in table:\r\n objFile.write(cd.saveFormat())\r\n return table", "def save(self, data, outpath):\n data.to_csv(outpath)", "def execute(cls, path: Path) -> None:\n file_csv_convert = FileCsvConvert.create_by_path_csv_convert(path)\n list_convert_table = cls._load_csv(file_csv_convert, path)\n file_csv_convert.value.convert_table_type.value.model.save_all(list_convert_table)", "def save(self, path):\n torch.save(\n {\n \"input_dimension\": self.input_dimension,\n \"quantiles\": self.quantiles,\n \"width\": self.width,\n \"depth\": self.depth,\n \"activation\": self.activation,\n \"network_state\": self.state_dict(),\n \"optimizer_state\": self.optimizer.state_dict(),\n },\n path,\n )", "def save_data(df: pd.DataFrame, database_filename: str) -> None:\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(Path(database_filename).stem, engine, index=False, if_exists=\"replace\")", "def save_json_file():\n global output_on_display, import_lst, column_names, data, new_data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = new_data\n step = len(column_names)\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"JSON\", \"*.json\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.json')\n data = import_lst\n\n if len(data[0]) == step:\n pass\n else:\n data = import_lst[step::]\n\n data2 = list(map(list, zip(*data)))\n\n data3 = {key: value for key, value in zip(column_names, data2)}\n\n column = list(data3.keys())\n\n df = pd.DataFrame(data3, columns=column)\n\n data_dict = df.to_dict(orient=\"records\")\n with open(save_name, \"w+\") as f:\n json.dump(data_dict, f, indent=4)\n\n data.clear()\n data2.clear()\n data3.clear()", "def on_save_as(self, event):\n data = self._get_data_selection(event)\n # path = None\n default_name = data.name\n if default_name.count('.') > 0:\n default_name = default_name.split('.')[0]\n default_name += \"_out\"\n if self.parent is not None:\n if issubclass(data.__class__, Data1D):\n self.parent.save_data1d(data, default_name)\n elif issubclass(data.__class__, Data2D):\n self.parent.save_data2d(data, default_name)\n else:\n print(\"unable to save this type of data\")", "def save(self, path):\n torch.save(self, path)", "def save(self, path):\n torch.save(self, path)", "def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()", "def __write_source(self, handle, nbr):\n try:\n source = self.database.get_source_from_handle(handle)\n self.__write_row(nbr, handle, source)\n except:\n source = \"NOT FOUND\"\n self.__write_row(nbr, handle, source)", "def SaveData(self, conn):\n t = time.process_time()\n log = Logger()\n cursor = conn.cursor()\n sqlQuery = \"insert into old_data \" \\\n \"select * from new_data\"\n cursor.execute(sqlQuery)\n conn.commit()\n conn.close()\n elapsed_t = time.process_time() - t\n log.log('Datos de cambios copiados a tabla', elapsed_t)", "def store_to_file(self, file_type, path):\n if not isinstance(self.dataframe, pd.DataFrame):\n raise ValueError('dataframe argument is not a Pandas.DataFrame')\n elif file_type.lower() not in ['csv', 'pickle']:\n raise ValueError('File_type arg must be either pickle or csv.')\n elif file_type.lower() == 'pickle':\n self.sort_df()\n self.dataframe.to_pickle(path=path)\n else:\n self.sort_df()\n self.dataframe.to_csv(path_or_buf=path)\n self.path = path\n return self.path", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def save_model(self, model_path: str):", "def changeModelSavePath(self,path):\n self.modelPath = path", "def save(self):\r\n self.df_app_data = self.df_app_data.to_csv(\"app_data.csv\", index=False)", "def save_data(df, database_filepath):\n # create a database connect\n conn = sqlite3.connect(database_filepath)\n # replace .db with empty space for new table name\n table_name = database_filepath.replace('.db', '')\n \n return df.to_sql(table_name, con=conn, if_exists='replace', index=False)", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def save(self):\n assert self.data is not None\n with open(self._csv_path, mode=\"w\", encoding=\"utf-8\") as spp_file:\n # We don't want to save the index, as it's not especially meaningful, and makes life harder when trying to\n # restore the binary version from the csv (the index column would be imported and then need to be dropped).\n self.data.to_csv(spp_file, index=False)", "def changeResultsSavePath(self,path):\n self.resultsPath = path", "def write_df_to_db(df, db_path):\n print \"Writing to 'results' table in db: \", db_path\n conn = sqlite3.connect(db_path)\n df.to_sql(\"results\", con=conn,if_exists='replace')", "def save_GRID( self , filename ):\n self._fwrite_GRID( filename )", "def write_to(self, fname, **kwargs):\n data = self.to_Table()\n data.write(fname, **kwargs)", "def save(self, directory):\n pass # pragma: no cover", "def save(self, path: str):\n torch.save(self, path)", "def set_master_table(filepath):\n my_globals['master_table_path'] = filepath\n my_globals['master_table_data'] = None", "def save_table_cache(table, tracer_id, model, table_name, verbose=True):\n check_cache_path(model, verbose=verbose)\n filepath = paths.tracer_cache_filepath(tracer_id, model, table_name=table_name)\n printv(f'Saving table to cache: {filepath}', verbose)\n table.to_pickle(filepath)", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def save(self, filename):\n pass", "def save(self, file_prefix, first_colname=\"\", **kwargs):\n\n write_refset(self, file_prefix, first_colname=first_colname, **kwargs)", "def save(self, **kwargs):\n if len(self.path) > 0:\n self.path = self.path.strip().rstrip()\n super(TargetPath, self).save(**kwargs)", "def save_model(self, filename):\r\n pass", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save_dataset(self):\n if self.res_dataset is None:\n return\n if self.write_path is None:\n raise Exception(\"Error: Attempted to save result dataset without ever specifiying a path to write to\")\n\n if self.format == \"arrow\":\n self.res_dataset.save_to_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset.to_csv(self.write_path, index = False)", "def psql_saver(spark, df, tbname, savemode='error'):\n df.createOrReplaceTempView(\"view\")\n spark.sql('''SELECT * FROM view''').write \\\n .format('jdbc') \\\n .option('url', 'jdbc:postgresql://%s' % __credential__.jdbc_accessible_host_psql) \\\n .option('dbtable', tbname) \\\n .option('user', __credential__.user_psql) \\\n .option('password', __credential__.password_psql) \\\n .mode(savemode) \\\n .save()", "def save_dst_to_file(dst, dir_file):\n dst = dst.sort_values('event')\n store = pd.HDFStore(dir_file, \"w\", complib=str(\"zlib\"), complevel=4)\n store.put('dataframe', dst, format='table', data_columns=True)\n store.close()", "def save(self):\n # TODO (Pierre): code", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('disasterdata', engine, index=False)", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)", "def saveData(self):\n pdIds = pd.DataFrame.from_dict(self.pathIds, orient='index')\n pdCrr = pd.DataFrame.from_dict(self.pathCrr, orient='index', columns=['cid'])\n mergedData = pd.concat([pdIds, pdCrr['cid']], axis=1, ignore_index=False)\n\n # Create the save dialog box\n name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File',\n '', 'csv files (*.csv)', 'csv file (*.csv)')\n\n if not name:\n return\n # Check the extension when saving\n if self.csvExt in name:\n mergedData.to_csv(name, header=False, index=True)\n else:\n message = 'Error saving file {}.'.format(name)\n self.messageBox(message)" ]
[ "0.6399908", "0.6323182", "0.61869615", "0.6044013", "0.6044013", "0.6044013", "0.59645635", "0.59101415", "0.58439785", "0.5820229", "0.58036727", "0.57980114", "0.5796495", "0.5790788", "0.57840085", "0.57607806", "0.57270277", "0.57178634", "0.570963", "0.570963", "0.5641698", "0.5637297", "0.56280494", "0.5623022", "0.5577594", "0.55647856", "0.55595964", "0.55562663", "0.5553746", "0.5539131", "0.5529425", "0.5517633", "0.55151975", "0.55137146", "0.5491308", "0.54810786", "0.547215", "0.547215", "0.5460846", "0.5460846", "0.5460846", "0.5436471", "0.54340816", "0.54191285", "0.541744", "0.54078764", "0.54058915", "0.5402148", "0.53996557", "0.53964", "0.53905416", "0.53892034", "0.53838253", "0.53797483", "0.5368994", "0.5364034", "0.5360471", "0.53522855", "0.5351695", "0.53492045", "0.53395855", "0.5336542", "0.53206795", "0.5319575", "0.5312305", "0.5312305", "0.5311189", "0.5306667", "0.53061885", "0.5306033", "0.5303555", "0.5292792", "0.5288321", "0.5287092", "0.5284326", "0.5282226", "0.52807826", "0.52762705", "0.5267869", "0.5265901", "0.5259637", "0.5251488", "0.524989", "0.5248832", "0.52403295", "0.5240121", "0.52327687", "0.5232159", "0.52303874", "0.5228213", "0.52279085", "0.52279085", "0.52265894", "0.521129", "0.5210448", "0.52072227", "0.5202649", "0.5201983", "0.5198104", "0.5197644" ]
0.582292
9
Registers all the JRPC overloaders in the jrpc server
def register_overloaders(jrpc_server: JRPCServer, receiver) -> None: jrpc_server.register_overloader( 'Application.GetProperties', lambda server: GetPropertiesOverloader(server, receiver)) jrpc_server.register_overloader( 'Application.SetMute', lambda server: SetMuteOverloader(receiver)) jrpc_server.register_overloader( 'Application.SetVolume', lambda server: SetVolumeOverloader(receiver)) jrpc_server.register_overloader( 'Application.Quit', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Hibernate', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Shutdown', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Suspend', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.GetProperties', lambda server: SystemPropertiesOverloader())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def register_rpc_proxies(self):\n for rpc_name in self.rpc_proxy_list:\n logger.debug('Registering RPC to Proxy: {}'.format(rpc_name))\n\n class RPCProxy:\n\n def __init__(self, local_session, rpc_name):\n self._local_session = local_session\n self._rpc_name = rpc_name\n\n async def __call__(self, *args, **kwargs):\n logger.debug('Proxying RPC {}, with args {}, kwargs {}'.format(self._rpc_name, args, kwargs))\n return await self._local_session.call(self._rpc_name, *args, **kwargs)\n\n await self.remote_session.register(RPCProxy(self.local_session, rpc_name), rpc_name)", "def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))", "def _setup_rpc(self):\n pass", "def xmlrpc_methods():", "def register_peer(self):\n try:\n self.get_file_list()\n num_files = len(self.file_list)\n total_ops = self.key_end - self.key_start\n run_ops = total_ops/num_files\n print \"Staring Benchmark Register Peer with Server...\"\n t1 = time.time()\n for i in range(run_ops):\n for file in self.file_list:\n self.service.put(file, self.peer_id)\n t2 = time.time()\n total = run_ops * num_files\n print \"%s Register operations = %s sec\" % (total,t2-t1)\n print \"per Register operation = %s sec\" % ((t2-t1)/total)\n print \"per Register operation = %s msec\" % (((t2-t1)/total)*1000)\n except Exception as e:\n print \"Registering Peer Error, %s\" % e\n sys.exit(1)", "def registerRPC(self, call, args = None):\n\n rpc = RemoteProcedureCall(self, len(self.rpc), args)\n self.rpc.append(call)\n return rpc", "def register_extensions(app):\n grpc_channel = grpc.insecure_channel(\n f\"{app.config['GRPC_SERVICE']}:{app.config['GRPC_PORT']}\",\n )\n grpc_client = GrpcClient(grpc_channel)\n grpc_client.init_app(app)", "def make_json_handler(rpc):\n\n class JSONRPCHandler(BaseHTTPRequestHandler):\n \"\"\"\n A request handler for http.server that speaks JSON-RPC.\n \"\"\"\n def _validate_http_request(self):\n \"\"\"\n Ensures that we understand the HTTP portion of the request.\n \"\"\"\n if self.path != '/':\n print('Invalid request path:', self.path)\n self.send_error(HTTPStatus.NOT_FOUND, 'Request Must Have Path Of /')\n raise ValueError\n\n content_type = self.headers.get('Content-Type', None)\n if content_type != 'application/json':\n print('Invalid request Content-Type:', self.path)\n self.send_error(HTTPStatus.BAD_REQUEST, 'Content-Type Must Be application/json')\n raise ValueError\n\n def _validate_rpc_request(self, request):\n \"\"\"\n Ensures that we understand the JSON-RPC portion of the request.\n \"\"\"\n if request.get('jsonrpc', None) != '2.0':\n raise ValueError('Invalid jsonrpc: must be \"2.0\"')\n\n id = request.get('id', None)\n if not (id is None or isinstance(id, (str, int, float))):\n raise ValueError('Invalid id: must be null, string or number')\n\n method = request.get('method', None)\n if not isinstance(method, str):\n raise ValueError('Invalid method: must be string')\n\n params = request.get('params', [])\n if not isinstance(params, (dict, list)):\n raise ValueError('Invalid params: must be array or object')\n\n def _build_rpc_error(self, id, error, exception, keep_null_id=False):\n \"\"\"\n Returns an error response that can be encoded to JSON.\n\n By default this respects the ID of the request, and returns None if the\n ID is also None. To override this behavior, set keep_null_id=True.\n \"\"\"\n if id is None and not keep_null_id:\n return None\n\n message = RPC_ERROR_MESSAGES.get(error, str(exception))\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'error': {\n 'code': error.value,\n 'message': message,\n 'data': {\n 'stacktrace': str(exception) + '\\n' + '\\n'.join(traceback.format_tb(exception.__traceback__))\n }\n }\n }\n\n def _build_rpc_result(self, id, result):\n \"\"\"\n Returns a result response that can be encoded to JSON.\n \"\"\"\n if id is None:\n return None\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'result': result\n }\n\n def _process_request(self, request):\n \"\"\"\n Calls a single RPC function and returns the result.\n \"\"\"\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)\n\n def _send_json(self, value):\n \"\"\"\n Dumps the value to a JSON string, and sets the appropriate headers to\n return it\n \"\"\"\n raw_value = json.dumps(value).encode('utf-8')\n\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.send_header('Content-Type', 'application/json')\n self.send_header('Content-Length', str(len(raw_value)))\n self.end_headers()\n\n self.wfile.write(raw_value)\n\n def do_POST(self):\n \"\"\"\n Parses and processes a single or batch JSON-RPC request.\n \"\"\"\n try:\n self._validate_http_request()\n except ValueError:\n return\n\n content_length = int(self.headers.get('Content-Length', '0'))\n request_bytes = self.rfile.read(content_length)\n while len(request_bytes) < content_length:\n request_bytes += self.rfile.read(content_length - len(request_bytes))\n\n request_raw = request_bytes.decode('utf-8')\n try:\n request = json.loads(request_raw)\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.PARSE_ERROR, err, keep_null_id=True)\n self._send_json(error)\n return\n\n if isinstance(request, list):\n responses = [self._process_request(single) for single in request]\n response = [r for r in responses if r is not None]\n elif isinstance(request, dict):\n response = self._process_request(request)\n else:\n try:\n raise ValueError\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err)\n self._send_json(error)\n return\n\n if response is not None:\n self._send_json(response)\n else:\n self.send_response(200, 'OK')\n self.end_headers()\n\n def do_OPTIONS(self):\n \"\"\"\n Sends back the headers necessary to support CORS\n \"\"\"\n print('Processing CORS OPTIONS request')\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.end_headers()\n\n return JSONRPCHandler", "def register_resources(self):\n raise NotImplementedError", "def register_options(options):\n return (\n options\n .register('jsonFilterFile',\n type_=str,\n default=None,\n description=\"Path to JSON file containing certified runs and luminosity blocks.\")\n .register('useHLTFilter',\n type_=bool,\n default=False,\n description=\"If True, only events triggered by one of the skimmed paths will be \"\n \"written out.\")\n .register('jetCollections',\n type_=str,\n default=[],\n multiplicity='list',\n description=\"The names of the jet collections to use (e.g. 'AK4PFCHS').\")\n .register('jecVersion',\n type_=str,\n default=None,\n description=\"Tag of JEC version to use for e.g. JEC uncertainties.\")\n .register('jecFromGlobalTag',\n type_=bool,\n default=False,\n description=\"If True, the JECs will be looked up in the conditions database \"\n \"(CondDB/Frontier) under the current global tag. If False, the \"\n \"text files for `jecVersion` will be used.\")\n .register('jerVersion',\n type_=str,\n default=None,\n description=\"Tag of JER version to use for e.g. jet smearing.\")\n .register('jerMethod',\n type_=str,\n default='stochastic',\n description=\"Method to use for JER smearing. One of: 'stochastic', 'hybrid'\")\n .register('jerGenMatchPtSigma',\n type_=float,\n default=3.0,\n description=\"Size of Gaussian core for 'hybrid' JER smearing.\")\n .register('jetIDSpec',\n type_=str,\n default=None,\n description=\"Version of Jet ID to use (e.g. '2016').\")\n .register('jetIDWorkingPoint',\n type_=str,\n default=None,\n description=\"Working point of Jet ID to use (e.g. 'TightLepVeto').\")\n .register('prefiringWeightFilePath',\n type_=str,\n default=\"\",\n description=\"Path to ROOT file containing prefiring weights.\")\n .register('prefiringWeightHistName',\n type_=str,\n default=\"\",\n description=\"Name of histogram inside prefiring weights file (e.g. 'L1prefiring_jetpt_2016BCD').\")\n .register('useObjectBasedJetID',\n type_=bool,\n default=False,\n description=\"If True, only jets passing the ID specified via 'jetIDSpec' and `jetIDWorkingPoint` will be considered valid.\")\n .register('checkForCompleteness',\n type_=bool,\n default=False,\n description=(\"(for testing) If True, will run some checks on the \"\n \"Ntuple output to ensure all branches are written out \"\n \"and no branch is omitted.\"))\n .register('stitchingWeight',\n type_=float,\n default=1.0,\n description=(\"(deprecated) The output branch 'stitchingWeight' \"\n \"will contain this value for each event. Can then be \"\n \"used when stitching together different samples.\"))\n .register('doJECUncertaintySources',\n type_=bool,\n default=False,\n description=\"Fill ntuple branch with JEC correction factors for individual JEC uncertainty sources.\")\n .register('doPrescales',\n type_=bool,\n default=False,\n description=\"Write out trigger prescales to Ntuple.\")\n .register('edmOut',\n type_=bool,\n default=False,\n description=\"(for testing only) Write out EDM file.\")\n )", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def register_server(self, ppclassname, ppclass):\n\n global rpc_pp_class\n\n def accept(cmd, data, eof, sock, address):\n \"\"\" This is called by the rpc.py's tcp listener when a remote\n client connects \"\"\"\n\n ppclass = rpc_pp_class.get(cmd)\n if ppclass == None:\n warning('No PP class found: %s\\n' %(cmd))\n return RPC_CLOSE\n\n # Instantiate the given server class\n ppclass(address=address, sock=sock, data=data)\n return RPC_RELEASE\n\n if register_rpchandler(ppclassname, accept):\n rpc_pp_class[ppclassname] = ppclass", "def register_router(self, router):\n for prefix, viewset, basename in router.registry:\n self.register(prefix, viewset, base_name=basename)", "def register_rpc_backend(backend_name, init_rpc_backend_handler):\n rpc_backend_registry = _get_rpc_backend_registry()\n if backend_name in rpc_backend_registry:\n raise RuntimeError(\"Rpc backend {}: already registered\".format(backend_name))\n rpc_backend_registry[backend_name] = init_rpc_backend_handler", "def __init__(self):\n super(LoopbackTransport, self).__init__([_JSON_RPC_SERVER_PATH])", "def configure_rpc(cls, scheme=None):\r\n scheme = scheme or cls._meta.scheme\r\n\r\n if not scheme:\r\n return\r\n\r\n if isinstance(scheme, basestring):\r\n scheme = importlib.import_module(scheme)\r\n\r\n cls.scheme_name = scheme.__name__\r\n\r\n methods = getattr(scheme, '__all__', None) \\\r\n or [m for m in dir(scheme) if not m.startswith('_')]\r\n\r\n for mname in methods:\r\n method = getattr(scheme, mname)\r\n if hasattr(method, '__call__'):\r\n cls.methods[\"{0}.{1}\".format(\r\n cls.scheme_name, method.__name__)] = method", "def remotes():", "def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))", "def register(self):\n\n RPCObjectsRegistry.add(self)", "def server_plugin():", "async def test_multiple_rpc_transports(loop, server, redis_server_b, consume_rpcs):\n registry.add(ApiA())\n registry.add(ApiB())\n\n manually_set_plugins(plugins={})\n\n redis_server_a = server\n\n port_a = redis_server_a.tcp_address.port\n port_b = redis_server_b.tcp_address.port\n\n logging.warning(f'Server A port: {port_a}')\n logging.warning(f'Server B port: {port_b}')\n\n config = Config.load_dict({\n 'bus': {\n 'schema': {\n 'transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n }\n },\n 'apis': {\n 'default': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n },\n 'api_b': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n },\n }\n })\n\n bus = BusNode(name='', parent=None, bus_client=lightbus.BusClient(config=config, loop=loop))\n asyncio.ensure_future(consume_rpcs(bus))\n await asyncio.sleep(0.1)\n\n await bus.api_a.rpc_a.call_async()\n await bus.api_b.rpc_b.call_async()", "def register_specs(self, *args):\n self._corespecs_queue.extend(*args)", "def _initiate_registry_from_torchlib(\n self, torchlib_registry: registration.Registry\n ):\n for aten_name, aten_overloads_func in torchlib_registry.items():\n for func in aten_overloads_func.overloads:\n self.register(\n aten_name,\n self._opset_version,\n func,\n custom=False,\n )", "def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def extend(self, router):\n self.registry.extend(router.registry)", "def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()", "def ListWorkers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def server_plugin_list(ctx):\n data = ctx.obj.get_all_plugins()\n output_json_data(data)", "def api_all():\n update_pvserver_instances(pvserver_instances)\n return jsonify(pvserver_instances)", "def __init__(self, addr=(\"localhost\", 8000)):\n\n Thread.__init__(self)\n RPCBaseProxy.__init__(self)\n\n self.__instances = []\n\n self.__server = SimpleXMLRPCServer(addr=addr, logRequests=False, requestHandler=SimpleXMLRPCRequestHandler)\n self.__server.register_instance(self)", "def RegisterPlayers(self):\n self.tcf.RegisterPlayers(self.player_handler.GetPlayers())", "def _register_services(self) -> None:\n\n for isr in self.immediate_services_with_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n isr_instance = isr()\n for handler_type in isr.message_handler_types():\n # for each explicitly supported type, add it to the router\n self.immediate_msg_with_reply_router[handler_type] = isr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_with_reply_router[\n handler_type_subclass\n ] = isr_instance\n\n for iswr in self.immediate_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n iswr_instance = iswr()\n for handler_type in iswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.immediate_msg_without_reply_router[handler_type] = iswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_without_reply_router[\n handler_type_subclass\n ] = iswr_instance\n\n for eswr in self.eventual_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n eswr_instance = eswr()\n for handler_type in eswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.eventual_msg_without_reply_router[handler_type] = eswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.eventual_msg_without_reply_router[\n handler_type_subclass\n ] = eswr_instance\n\n # Set the services_registered flag to true so that we know that all services\n # have been properly registered. This mostly exists because someone might\n # accidentally delete (forget to call) this method inside the __init__ function\n # of a sub-class of Node.\n self.services_registered = True", "def _register(self, comm, handler):", "def StartAllProtocols(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"startAllProtocols\", payload=payload, response_object=None)", "def register_plugins(self):\n if self.plugin_whitelist:\n plugin_list = self.plugin_whitelist\n else:\n plugin_list = plugins.__all__\n\n for plugin in plugin_list:\n if plugin in plugins.__all__:\n self.register_plugin(plugin,\n self.plugin_config.get(plugin, {}))\n else:\n raise NameError(\"Plugin %s not in plugins.__all__.\" % plugin)\n\n # Resolve plugin inter-dependencies.\n for plugin in self.plugin:\n self.plugin[plugin].post_init()", "def start_protocols(self, context: ResourceCommandContext) -> None:\n self.handler.start_protocols()", "def GetServers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def remote_registerEngine(self, engineReference):", "def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected", "def connect_multiplexer(multiplexer):", "def register_plugin_calls(*funcs):\n wrapped_dict = {}\n for func in funcs:\n wrapped_dict[func.__name__] = _handle_serialization(func)\n XenAPIPlugin.dispatch(wrapped_dict)", "def mergeRPC(remoteETM,remoteLoca): #Status: WIP\r\n pass", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def register_interface_magics(self):\n from sage.repl.interface_magic import InterfaceMagic\n InterfaceMagic.register_all(self.shell)", "def rpc(self) -> global___Rpc:", "def _register_serializers(self):\n import ray.util.serialization_addons\n from ray.util.serialization import StandaloneSerializationContext\n\n ctx = StandaloneSerializationContext()\n ray.util.serialization_addons.apply(ctx)", "def registerPatches(self,pl):\n self.set('patchmesh.patches',pl)", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def register_response_receivers(self, *response_receivers):\n for response_receiver in response_receivers:\n self.translator.message_receivers.append(response_receiver)", "def createSimpleXMLRPCServer(host, ports):\n\tfor port in ports:\n\t\ttry:\n\t\t\tres = xserver.SimpleXMLRPCServer((host,port))\n\t\texcept Exception as e:\n\t\t\tprint(\"Retrying:%s\"%(e))\n\t\telse:\n\t\t\tres.logRequests = 0 # hint from newsgroup to disable the - - [13/Apr/2007 13:33:57] \"POST /RPC2 HTTP/1.0\" 200 - output with every request\n\t\t\treturn res", "def rpc_call_to_all(client, call, prefix=DOCK_CONTAINER_NAME_PREFIX_BTC, arguments=None,\n rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n containers = get_containers_names(client, prefix)\n r = []\n for c in containers:\n rpc_server = get_ip_by_unknown(client, c)\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n args = \"(\" + arguments + \")\" if arguments else \"()\"\n r.append(eval(\"rpc_connection.\" + call + args))\n return r\n except JSONRPCException as err:\n return False", "def _register_methods(self, registry, instance_type):\n to_register = list(instance_type.jit_methods) + \\\n list(instance_type.jit_static_methods)\n for meth in to_register:\n\n # There's no way to retrieve the particular method name\n # inside the implementation function, so we have to register a\n # specific closure for each different name\n if meth not in self.implemented_methods:\n self._implement_method(registry, meth)\n self.implemented_methods.add(meth)", "def install_serve_encoders_to_fastapi():\n # https://stackoverflow.com/questions/62311401/override-default-encoders-for-jsonable-encoder-in-fastapi # noqa\n pydantic.json.ENCODERS_BY_TYPE.update(serve_encoders)\n # FastAPI cache these encoders at import time, so we also needs to refresh it.\n fastapi.encoders.encoders_by_class_tuples = (\n fastapi.encoders.generate_encoders_by_class_tuples(\n pydantic.json.ENCODERS_BY_TYPE\n )\n )", "def _register_pycoin_networks() -> None:\n import os\n\n global _registered_pycoin\n if _registered_pycoin:\n return\n _registered_pycoin = True\n\n paths = os.environ.get('PYCOIN_NETWORK_PATHS', '').split()\n if 'hathor.pycoin' not in paths:\n paths.append('hathor.pycoin')\n os.environ['PYCOIN_NETWORK_PATHS'] = ' '.join(paths)", "def rpcmethod(func):\n func.rpcmethod = True\n return func", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def registerWebsocketProtocol(connection, protocol): #@NoSelf", "def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")", "def __init__(self, rpc, encoder):\n super(EncoderModule, self).__init__(rpc, 'encoder', encoder)", "def register_resources(self, resources):\n for resource in resources:\n self.register_resource(resource)", "def rpc_method(func):\n func.rpc_callable = True\n return func", "def add_servers(self, servers: List[Server]):\n pass", "def add_ip_cores(self, scfg, ip_dir):\r\n\r\n return []", "def register_opts():\n _register_api_opts()\n _register_db_opts()", "def registerXMLRPC(self, unique_service_name, instance, endpoint):\n # TODO only set the ClientCert Handler if configured\n handler = XMLRPCHandler(unique_service_name)\n handler.connect(self._flaskapp.app, endpoint)\n handler.register_instance(instance)", "def register_blueprints(api):\n for module in MODULES:\n api.register_blueprint(module.blp)", "def beta_create_CoreRPC_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):\n request_deserializers = {\n ('pb.CoreRPC', 'AddNode'): AddNodeOptions.FromString,\n ('pb.CoreRPC', 'AddPod'): AddPodOptions.FromString,\n ('pb.CoreRPC', 'Backup'): BackupOptions.FromString,\n ('pb.CoreRPC', 'BuildImage'): BuildImageOptions.FromString,\n ('pb.CoreRPC', 'CreateContainer'): DeployOptions.FromString,\n ('pb.CoreRPC', 'GetContainer'): ContainerID.FromString,\n ('pb.CoreRPC', 'GetContainers'): ContainerIDs.FromString,\n ('pb.CoreRPC', 'GetNode'): GetNodeOptions.FromString,\n ('pb.CoreRPC', 'GetPod'): GetPodOptions.FromString,\n ('pb.CoreRPC', 'ListNetworks'): GetPodOptions.FromString,\n ('pb.CoreRPC', 'ListPodNodes'): ListNodesOptions.FromString,\n ('pb.CoreRPC', 'ListPods'): Empty.FromString,\n ('pb.CoreRPC', 'ReallocResource'): ReallocOptions.FromString,\n ('pb.CoreRPC', 'RemoveContainer'): ContainerIDs.FromString,\n ('pb.CoreRPC', 'RemoveImage'): RemoveImageOptions.FromString,\n ('pb.CoreRPC', 'RemoveNode'): RemoveNodeOptions.FromString,\n ('pb.CoreRPC', 'RemovePod'): RemovePodOptions.FromString,\n ('pb.CoreRPC', 'RunAndWait'): RunAndWaitOptions.FromString,\n ('pb.CoreRPC', 'SetNodeAvailable'): NodeAvailable.FromString,\n }\n response_serializers = {\n ('pb.CoreRPC', 'AddNode'): Node.SerializeToString,\n ('pb.CoreRPC', 'AddPod'): Pod.SerializeToString,\n ('pb.CoreRPC', 'Backup'): BackupMessage.SerializeToString,\n ('pb.CoreRPC', 'BuildImage'): BuildImageMessage.SerializeToString,\n ('pb.CoreRPC', 'CreateContainer'): CreateContainerMessage.SerializeToString,\n ('pb.CoreRPC', 'GetContainer'): Container.SerializeToString,\n ('pb.CoreRPC', 'GetContainers'): Containers.SerializeToString,\n ('pb.CoreRPC', 'GetNode'): Node.SerializeToString,\n ('pb.CoreRPC', 'GetPod'): Pod.SerializeToString,\n ('pb.CoreRPC', 'ListNetworks'): Networks.SerializeToString,\n ('pb.CoreRPC', 'ListPodNodes'): Nodes.SerializeToString,\n ('pb.CoreRPC', 'ListPods'): Pods.SerializeToString,\n ('pb.CoreRPC', 'ReallocResource'): ReallocResourceMessage.SerializeToString,\n ('pb.CoreRPC', 'RemoveContainer'): RemoveContainerMessage.SerializeToString,\n ('pb.CoreRPC', 'RemoveImage'): RemoveImageMessage.SerializeToString,\n ('pb.CoreRPC', 'RemoveNode'): Pod.SerializeToString,\n ('pb.CoreRPC', 'RemovePod'): Empty.SerializeToString,\n ('pb.CoreRPC', 'RunAndWait'): RunAndWaitMessage.SerializeToString,\n ('pb.CoreRPC', 'SetNodeAvailable'): Node.SerializeToString,\n }\n method_implementations = {\n ('pb.CoreRPC', 'AddNode'): face_utilities.unary_unary_inline(servicer.AddNode),\n ('pb.CoreRPC', 'AddPod'): face_utilities.unary_unary_inline(servicer.AddPod),\n ('pb.CoreRPC', 'Backup'): face_utilities.unary_unary_inline(servicer.Backup),\n ('pb.CoreRPC', 'BuildImage'): face_utilities.unary_stream_inline(servicer.BuildImage),\n ('pb.CoreRPC', 'CreateContainer'): face_utilities.unary_stream_inline(servicer.CreateContainer),\n ('pb.CoreRPC', 'GetContainer'): face_utilities.unary_unary_inline(servicer.GetContainer),\n ('pb.CoreRPC', 'GetContainers'): face_utilities.unary_unary_inline(servicer.GetContainers),\n ('pb.CoreRPC', 'GetNode'): face_utilities.unary_unary_inline(servicer.GetNode),\n ('pb.CoreRPC', 'GetPod'): face_utilities.unary_unary_inline(servicer.GetPod),\n ('pb.CoreRPC', 'ListNetworks'): face_utilities.unary_unary_inline(servicer.ListNetworks),\n ('pb.CoreRPC', 'ListPodNodes'): face_utilities.unary_unary_inline(servicer.ListPodNodes),\n ('pb.CoreRPC', 'ListPods'): face_utilities.unary_unary_inline(servicer.ListPods),\n ('pb.CoreRPC', 'ReallocResource'): face_utilities.unary_stream_inline(servicer.ReallocResource),\n ('pb.CoreRPC', 'RemoveContainer'): face_utilities.unary_stream_inline(servicer.RemoveContainer),\n ('pb.CoreRPC', 'RemoveImage'): face_utilities.unary_stream_inline(servicer.RemoveImage),\n ('pb.CoreRPC', 'RemoveNode'): face_utilities.unary_unary_inline(servicer.RemoveNode),\n ('pb.CoreRPC', 'RemovePod'): face_utilities.unary_unary_inline(servicer.RemovePod),\n ('pb.CoreRPC', 'RunAndWait'): face_utilities.stream_stream_inline(servicer.RunAndWait),\n ('pb.CoreRPC', 'SetNodeAvailable'): face_utilities.unary_unary_inline(servicer.SetNodeAvailable),\n }\n server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)\n return beta_implementations.server(method_implementations, options=server_options)", "def _bind_method_serializers(self, meth_serializers):\n\n # We can't use update because that would be the wrong\n # precedence\n for mtype, serializer in meth_serializers.items():\n self.serializers.setdefault(mtype, serializer)", "def pedrpc_connect(self):\n # If the process monitor is alive, set it's options\n if self.procmon:\n while 1:\n if self.procmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key, value in self.procmon_options.items():\n getattr(self.procmon, 'set_{0}'.format(key))(value)\n\n # If the network monitor is alive, set it's options\n if self.netmon:\n while 1:\n if self.netmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key in self.netmon_options.keys():\n eval('self.netmon.set_%s(self.netmon_options[\"%s\"])' % (key, key))", "def rpc_match():", "def main(server):\n config_parser = ConfigParser()\n config_parser.read('resources/conf/config.ini')\n\n log_level = log_levels.get(config_parser.get('core', 'log.level'))\n log_filename = config_parser.get('core', 'log.path')\n logging.basicConfig(level=log_level,\n format='%(levelname)s:%(asctime)s:%(message)s',\n filename=log_filename)\n replicator = Replicator(config_parser)\n executor = ThreadPoolExecutor(max_workers=2)\n executor.submit(replicator.start)\n logger = logging.getLogger('replicator')\n if log_level == 'DEBUG':\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n if server:\n if config_parser.has_section('server'):\n check_server_configuration(config_parser, logger)\n server_config = {\n 'server.socket_host' : config_parser.get('server', 'socket_host'),\n 'server.socket_port' : config_parser.getint('server', 'socket_port'),\n 'log.access_file' : config_parser.get('server', 'log.access_file'),\n 'log.error_file' : config_parser.get('server', 'log.error_file')\n }\n cherrypy.config.update(server_config)\n logger.info('Server will be started at %s:%d' % (server_config['server.socket_host'],\n server_config['server.socket_port']))\n else:\n logger.info('No settings found, using the default one. (localhost:8080)')\n executor.submit(cherrypy.quickstart(ReplicatorServer(replicator)))", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def register_classes():\n CollectLimits.register_class()\n CollectLimits_SG.register_class()\n CollectStackedLimits_SG.register_class()", "def registerBlueprints(module_name):\n module = importlib.import_module(\n \"app.modules.\" + module_name, package=None)\n bp = getattr(module, module_name)\n server_logger.info(\"Registering module: \" + module_name)\n if bp.name == \"index\":\n server.register_blueprint(bp)\n else:\n server.register_blueprint(bp, url_prefix='/' + bp.name)", "def _setup_rpc(self):\n\n endpoints = RpcCallBacks(self)\n self.server = rpc.DfaRpcServer(self.ser_q, self._host,\n self.cfg.dfa_rpc.transport_url,\n endpoints,\n exchange=constants.DFA_EXCHANGE)", "def RegisterProtoRPCServiceToFlaskApp(app_inst, path, service_inst,\n service_name=None):\n service_name = service_name or service_inst.SERVICE_DESCRIPTOR.name\n endpoint_name = '__protorpc_service_view_func_' + str(uuid.uuid1())\n view_func = _ProtoRPCServiceFlaskAppViewFunc(service_inst)\n app_inst.add_url_rule('%s/%s.<method_name>' % (path, service_name),\n endpoint=endpoint_name, view_func=view_func,\n methods=['POST'])", "def _register_with_cluster(self, cpu_count):\n try:\n reply = cluster.send_command({\n \"op\": \"register\",\n \"address\": self.address,\n \"port_number\": port_number,\n \"cpu_count\": cpu_count\n }, wait_for_reply=True)\n\n if reply[\"op\"] == \"reply\":\n # print cluster information\n cluster_info = cluster.cluster_info\n print(\"Worker server at\", self.address, \"port number\", port_number)\n print(\"Joined cluster at\", cluster_info[0][\"address\"],\n \"port number\", cluster_info[0][\"port_number\"])\n return True\n\n else: return False\n\n except Exception as ex:\n print(\"Error:\", ex)\n return False", "def restart_all():\n\n restart_nginx()\n restart_supervisor()", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def register_server():\n (code, message) = rest_api.register_server(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def __init__(self) :\n self.remoteConnections = {}", "def serve_forever(self, poll_interval=0.5):\n logging.info('RPC server starting')\n self._idle_thread.start()\n SimpleJSONRPCServer.SimpleJSONRPCServer.serve_forever(self, poll_interval)", "def __init__(self, rpc):\n self.rpc = rpc", "def config_server_buffer_cb(data, option):\n\n for server in SERVERS.values():\n server.buffer_merge()\n return 1", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def registerExistingServer():\n cd('/')\n cd('/Servers/'+managedServername)\n registerServer(cmo)", "async def register_schemas(\n self, *, compatibility: str | None = None\n ) -> None:\n for subject_name, schema in self.schemas.items():\n await self._registry.register_schema(\n schema=schema,\n subject=subject_name,\n compatibility=compatibility,\n )", "def start_servers(self, **kwargs):\n self.cleanup()\n\n # Start up the API and default conductor server\n\n # We start the conductor server first, as the API server config\n # depends on the conductor port - this ordering allows for\n # retrying the launch on a port clash\n self.start_with_retry(self.conductor_server, 'conductor_port', 3,\n **kwargs)\n kwargs['conductor_port'] = self.conductor_server.bind_port\n\n self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)", "def interfaces(self):", "def interfaces(self):", "def register_blueprints(self):\n # Local import due to flask/blueprint circular imports.\n from mmapi.views import api_bp\n self.app.register_blueprint(api_bp, url_prefix='/api')", "def __init__(self, sc, conf):\n\n super(LBaaSv2RpcManager, self).__init__(sc, conf)", "def _extend_replays(self, num_per_problem: int):\n # fire off extension methods\n results = []\n for problem in tqdm.tqdm(self.problems, desc='spawn extend'):\n get_action = self._make_get_action(problem)\n extend_replay = rpyc.async(problem.problem_service.extend_replay)\n result = extend_replay(get_action, num_per_problem)\n # apparently I need to keep hold of async ref according to RPyC\n # docs (it's weak or s.th). Also, I need a background thread to\n # serve each environment's requests (...this may break things\n # slightly).\n bg_thread = rpyc.utils.helpers.BgServingThread(\n problem.problem_server.conn)\n results.append((extend_replay, result, bg_thread))\n\n # Now we wait for results to come back. This is horribly inefficient\n # when some environments are much harder than others; oh well.\n succ_rates = []\n for _, result, bg_thread in tqdm.tqdm(results, desc='wait extend'):\n succ_rates.append(to_local(result.value))\n # always shut down cleanly\n bg_thread.stop()\n\n return succ_rates", "def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()", "def register_service(service, iface, name):", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def intercept_server(server, *interceptors):\n from grpc_opentracing.grpcext import _interceptor\n return _interceptor.intercept_server(server, *interceptors)" ]
[ "0.5887183", "0.53439647", "0.5303127", "0.5229928", "0.5181709", "0.51379925", "0.5042705", "0.5018712", "0.50179935", "0.5007511", "0.50063205", "0.50051254", "0.49503332", "0.4928616", "0.4922325", "0.4869519", "0.4867418", "0.48619875", "0.4855552", "0.4855092", "0.48424605", "0.48338652", "0.48154017", "0.4812718", "0.47886303", "0.4785449", "0.47462106", "0.47255152", "0.47188923", "0.47185788", "0.4712613", "0.47057647", "0.4701785", "0.46949998", "0.46770906", "0.4675679", "0.46691817", "0.46608844", "0.46564987", "0.46444878", "0.4640018", "0.46365026", "0.46263722", "0.46160498", "0.4607915", "0.4601826", "0.45940533", "0.45865545", "0.458546", "0.45689985", "0.4566039", "0.4563341", "0.4554698", "0.45435888", "0.45314893", "0.45247945", "0.4516193", "0.45137843", "0.45019463", "0.4500071", "0.4488176", "0.44809964", "0.44768414", "0.4461565", "0.44516194", "0.44457823", "0.4443764", "0.4438238", "0.44329393", "0.4431043", "0.4418315", "0.44037017", "0.44012615", "0.44007838", "0.43958086", "0.4384573", "0.4382948", "0.43785262", "0.43757206", "0.4363904", "0.4361535", "0.43577668", "0.4354044", "0.43513423", "0.4348961", "0.43475127", "0.43447924", "0.43447924", "0.43445587", "0.4342884", "0.43405434", "0.43392128", "0.43392128", "0.43377495", "0.43376923", "0.43319023", "0.4330288", "0.43201506", "0.43200243", "0.43152118" ]
0.78924644
0