query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Print table from list of messages.
def print_table(headers, rows): try: if headers: print('\n') print(tabulate.tabulate( rows, headers=headers, tablefmt="plain", numalign="left" )) print('\n') except Exception as e: print(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")", "def ...
[ "0.69499284", "0.66310364", "0.65664935", "0.64629143", "0.64629143", "0.6451159", "0.6444036", "0.6387004", "0.63242275", "0.63035125", "0.6296198", "0.629306", "0.62866634", "0.6281273", "0.62464696", "0.6153312", "0.6153312", "0.61344033", "0.6124151", "0.60984", "0.608824...
0.6430923
7
Manages Docker containers' execution on batch systems.
def bdocker(ctx, host): ctx.obj = commands.CommandController(endpoint=host)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docker_worker():", "def main():\n client = docker.from_env()\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--package_id', default='0',\n help='provide id for the work package, comma separated if multiple')\n parser.add_argument('--load_quicksave'...
[ "0.7041346", "0.64235425", "0.6416735", "0.63321793", "0.6304628", "0.62385863", "0.61790556", "0.6143068", "0.61176145", "0.60870963", "0.6064856", "0.6035837", "0.6028406", "0.60254467", "0.59756327", "0.59450024", "0.5940408", "0.59361523", "0.5926757", "0.5865155", "0.583...
0.5935382
18
Configure credentials and batch environment. It creates a token credential for the user, and configure the batch environment to run dockers and control de accounting. Command executed by the root in prolog
def configure_environment(ctx, user): try: out = ctx.obj.configuration( user ) print_message(out["path"]) except BaseException as e: print_error(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configuration(self, data):\n required = {'admin_token', 'user_credentials'}\n api.validate(data, required)\n admin_token = data['admin_token']\n session_data = data['user_credentials']\n user_token = self.credentials_module.authenticate(\n admin_token, session_data...
[ "0.6231196", "0.6169389", "0.584659", "0.578861", "0.57143813", "0.56795084", "0.557102", "0.55013365", "0.54908735", "0.54244846", "0.53997415", "0.53970516", "0.5373382", "0.53675306", "0.53632724", "0.53570175", "0.5325696", "0.5263796", "0.52633834", "0.5260048", "0.52404...
0.5082915
34
Clean credentials and batch environment. It cleans a token credential for the user, and the batch environment, in addition to delete all dockers. Also, Command executed by the root in prolog
def clean_environment(ctx, token): try: out = ctx.obj.clean_environment(token) print_message(out) except BaseException as e: print_error(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n force = True\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n containers = self.credentials_module.list_...
[ "0.74475706", "0.61287606", "0.6078283", "0.6048094", "0.59989303", "0.5986939", "0.59787303", "0.59101623", "0.590741", "0.5895125", "0.58949953", "0.58100575", "0.5757535", "0.5750923", "0.5743235", "0.57311714", "0.5729552", "0.57249904", "0.57111883", "0.5707326", "0.5699...
0.6538844
1
Pull image from repository
def container_pull(ctx, token, source): try: out = ctx.obj.container_pull(token, source) print_message(out) except BaseException as e: print_error(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pull(self, data):\n required = {'token', 'source'}\n api.validate(data, required)\n token = data['token']\n repo = data['source']\n self.credentials_module.authorize(token)\n result = self.docker_module.pull_image(repo)\n # credentials_module.add_image(token, re...
[ "0.79796773", "0.77487904", "0.75974673", "0.73926455", "0.7349597", "0.72877115", "0.7240513", "0.700893", "0.69396365", "0.6846112", "0.6773135", "0.6690871", "0.6616364", "0.6582388", "0.6545659", "0.6466452", "0.64300317", "0.64291394", "0.64272416", "0.64215964", "0.6408...
0.5703203
63
List all the containers running
def container_list(ctx, token, all): try: out = ctx.obj.container_list(token, all) headers = ['CONTAINER ID', 'IMAGE', 'COMMAND', 'CREATED', 'STATUS', 'PORTS', 'NAMES'] print_table(headers, out) except BaseException as e: print_error(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def containers():\n # TODO: can there be multiple names?\n cmd = [ 'docker', 'ps', '--format', '{{.Names}}' ]\n with popen_text(cmd) as docker:\n for ln in docker.stdout:\n yield ln[:-1]", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def list(s...
[ "0.79300845", "0.76792014", "0.76520234", "0.7499341", "0.74776196", "0.7367797", "0.72929794", "0.71087295", "0.7014272", "0.6956588", "0.6936781", "0.6914076", "0.6901331", "0.69012845", "0.67714363", "0.6749582", "0.6717529", "0.6671837", "0.66680163", "0.6631425", "0.6581...
0.7676674
2
Show the log of a container
def container_logs(ctx, token, container_id): try: out = ctx.obj.container_logs(token, container_id) print_message(out) except BaseException: m = ("Error: No container related to %s" % container_id) print_error(m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logs(self, container: Container) -> str:", "def logs(name):\n\n try:\n container = CLIENT.containers.get(name)\n click.secho(str(container.logs()), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def stream_container_logs(container: Container) -> No...
[ "0.8408397", "0.7773719", "0.73624533", "0.7354112", "0.71977407", "0.71710056", "0.7030652", "0.6592035", "0.6587888", "0.65538996", "0.63391703", "0.62804216", "0.62721485", "0.6259622", "0.62353", "0.61764807", "0.61363643", "0.613622", "0.61057305", "0.60767347", "0.60549...
0.7430669
2
Return the low level information
def container_inspect(ctx, token, container_id): try: out = ctx.obj.container_inspect(token, container_id) print_message(out) except BaseException: m = ("Error: No such container: %s" % container_id) print_error(m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInfo():", "def info() -> None:", "def get_info(self):\n pass", "def get_info(self):\n pass", "def info(self) -> dict:", "def _get_information(self):\n pass", "def info(self):", "def info(self):", "def get_info(self):\n return \"TODO !\"", "def get_info(self):\n ...
[ "0.81291723", "0.7838908", "0.76136756", "0.76136756", "0.7582691", "0.7491799", "0.7458383", "0.7458383", "0.7445192", "0.7323257", "0.7281161", "0.7077621", "0.70342135", "0.69988096", "0.6942593", "0.6920749", "0.68934894", "0.68923295", "0.68902117", "0.67708105", "0.6769...
0.0
-1
Delete a container or list of them.
def container_delete(ctx, token, container_ids, force): try: out = ctx.obj.container_delete(token, container_ids, force) print_message(out) except exceptions.DockerException as e: m = e.message print_error(m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_container(self, container: Container):", "def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()", "def delete_container(ContainerName=None):\n pass", "def delete_container(self, account, container):\n \n pass"...
[ "0.80328494", "0.7434844", "0.7386382", "0.7215402", "0.6816788", "0.66818416", "0.66644293", "0.65226024", "0.6510037", "0.65017575", "0.64464664", "0.62738705", "0.62296575", "0.6196632", "0.6169314", "0.60266644", "0.6006505", "0.59684145", "0.5931678", "0.59242857", "0.59...
0.7737605
1
Copy files/folders Copy files/folders between a container and the local filesystem.
def copy(ctx, token, path): try: container_id = path["container_id"] container_path = path["container_path"] host_path = path["host_path"] host_to_container = path["host_to_container"] out = ctx.obj.copy_to_from_container(token, container_id, container_path, host_path, host_to_container) print_message(out) except BaseException as e: print_error(e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_container_dirs(project_name, container_dir_to_copy):\n copy_containers_dirs_func(project_name, container_dir_to_copy)", "def test_6c_copy_data_btw_containers(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login...
[ "0.68293494", "0.6804402", "0.672718", "0.66085654", "0.65086794", "0.65060276", "0.64967877", "0.64833045", "0.64071506", "0.6400087", "0.6289071", "0.6253664", "0.6189857", "0.6166261", "0.61502683", "0.6096343", "0.6089863", "0.6087521", "0.608357", "0.60329944", "0.602137...
0.6554481
4
Movement simulation for n+1 agents Agent can either be human or robot. humans are controlled by a unknown and fixed policy. robot is controlled by a known and learnable policy.
def __init__(self): self.time_limit = None self.time_step = None self.robot = None self.humans = None self.global_time = None self.robot_sensor_range = None # reward function self.success_reward = None self.collision_penalty = None self.discomfort_dist = None self.discomfort_penalty_factor = None # simulation configuration self.config = None self.case_capacity = None self.case_size = None self.case_counter = None self.randomize_attributes = None self.train_val_scenario = None self.test_scenario = None self.current_scenario = None self.square_width = None self.circle_radius = None self.human_num = None self.group_num = None self.group_size = None self.nonstop_human = None self.centralized_planning = None self.centralized_planner = None # for visualization self.states = None self.action_values = None self.attention_weights = None self.robot_actions = None self.rewards = None self.As = None self.Xs = None self.feats = None self.trajs = list() self.save_scene_dir = None self.panel_width = 10 self.panel_height = 10 self.panel_scale = 1 self.test_scene_seeds = [] self.dynamic_human_num = [] self.human_starts = [] self.human_goals = [] #for debug self.add_human = [] self.delete_human = [] self.total_group_size = 0 self.hp_25 = {} self.ha_25 = {} self.phase = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create ...
[ "0.7247015", "0.662848", "0.6565805", "0.6498094", "0.63772035", "0.6343703", "0.6315927", "0.6275957", "0.6245939", "0.6235411", "0.622889", "0.61916804", "0.6190207", "0.61158586", "0.6092612", "0.60810214", "0.6073287", "0.60604274", "0.60397756", "0.59972614", "0.5987725"...
0.0
-1
Set px, py, gx, gy, vx, vy, theta for robot and humans
def reset(self, phase='test', test_case=None): assert phase in ['train', 'val', 'test'] self.phase = phase if self.robot is None: raise AttributeError('Robot has to be set!') if test_case is not None: self.case_counter[phase] = test_case self.global_time = 0 base_seed = {'train': self.case_capacity['val'] + self.case_capacity['test'], 'val': 0, 'test': self.case_capacity['val']} self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0, np.pi / 2) if self.case_counter[phase] >= 0: np.random.seed(base_seed[phase] + self.case_counter[phase]) random.seed(base_seed[phase] + self.case_counter[phase]) if phase == 'test': logging.debug('current test seed is:{}'.format(base_seed[phase] + self.case_counter[phase])) if not self.robot.policy.multiagent_training and phase in ['train', 'val']: # only CADRL trains in circle crossing simulation human_num = 1 self.current_scenario = 'circle_crossing' else: self.current_scenario = self.test_scenario human_num = self.human_num self.humans = [] for _ in range(human_num): self.humans.append(self.generate_human()) # case_counter is always between 0 and case_size[phase] self.case_counter[phase] = (self.case_counter[phase] + 1) % self.case_size[phase] else: assert phase == 'test' if self.case_counter[phase] == -1: # for debugging purposes self.human_num = 3 self.humans = [Human(self.config, 'humans') for _ in range(self.human_num)] self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2) self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2) self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2) else: raise NotImplementedError #sha: #potential issue of set agent'time_step and agent.policy.time_step here #is that when agent is not add at the method self.reset(), then... for agent in [self.robot] + self.humans: agent.time_step = self.time_step agent.policy.time_step = self.time_step if self.centralized_planning: self.centralized_planner.time_step = self.time_step self.states = list() self.robot_actions = list() self.rewards = list() if hasattr(self.robot.policy, 'action_values'): self.action_values = list() if hasattr(self.robot.policy, 'get_attention_weights'): self.attention_weights = list() if hasattr(self.robot.policy, 'get_matrix_A'): self.As = list() if hasattr(self.robot.policy, 'get_feat'): self.feats = list() if hasattr(self.robot.policy, 'get_X'): self.Xs = list() if hasattr(self.robot.policy, 'trajs'): self.trajs = list() # get current observation if self.robot.sensor == 'coordinates': ob = self.compute_observation_for(self.robot) elif self.robot.sensor == 'RGB': raise NotImplementedError return ob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def set_robot(self, x, y):\n state = ModelState()\n state.model_name = 'turtlebot3_waffle_pi'\n ...
[ "0.66958505", "0.6607154", "0.6463861", "0.64320457", "0.6316836", "0.63069874", "0.6251611", "0.61822575", "0.6169917", "0.6128124", "0.604274", "0.6007004", "0.6003148", "0.5984499", "0.5962923", "0.59606934", "0.5928108", "0.59259844", "0.5916356", "0.5914894", "0.5906806"...
0.0
-1
Compute actions for all agents, detect collision, update environment and return (ob, reward, done, info)
def step(self, action, update=True): if self.centralized_planning: agent_states = [human.get_full_state() for human in self.humans] if self.robot.visible: agent_states.append(self.robot.get_full_state()) human_actions = self.centralized_planner.predict(agent_states)[:-1] else: human_actions = self.centralized_planner.predict(agent_states) else: human_actions = [] for human in self.humans: ob = self.compute_observation_for(human) human_actions.append(human.act(ob)) # collision detection dmin = float('inf') collision = False for i, human in enumerate(self.humans): px = human.px - self.robot.px py = human.py - self.robot.py if self.robot.kinematics == 'holonomic': vx = human.vx - action.vx vy = human.vy - action.vy else: vx = human.vx - action.v * np.cos(action.r + self.robot.theta) vy = human.vy - action.v * np.sin(action.r + self.robot.theta) ex = px + vx * self.time_step ey = py + vy * self.time_step # closest distance between boundaries of two agents closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius if closest_dist < 0: collision = True logging.debug("Collision: distance between robot and p{} is {:.2E} at time {:.2E}".format(human.id, closest_dist, self.global_time)) break elif closest_dist < dmin: dmin = closest_dist # collision detection between humans human_num = len(self.humans) for i in range(human_num): for j in range(i + 1, human_num): dx = self.humans[i].px - self.humans[j].px dy = self.humans[i].py - self.humans[j].py dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius if dist < 0: # detect collision but don't take humans' collision into account logging.debug('Collision happens between humans in step()') # check if reaching the goal end_position = np.array(self.robot.compute_position(action, self.time_step)) reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius if self.global_time >= self.time_limit - 1: reward = 0 done = True info = Timeout() elif collision: reward = self.collision_penalty done = True info = Collision() elif reaching_goal: reward = self.success_reward done = True info = ReachGoal() elif dmin < self.discomfort_dist: # adjust the reward based on FPS reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step done = False info = Discomfort(dmin) else: reward = 0 done = False info = Nothing() if update: # store state, action value and attention weights if hasattr(self.robot.policy, 'action_values'): self.action_values.append(self.robot.policy.action_values) if hasattr(self.robot.policy, 'get_attention_weights'): self.attention_weights.append(self.robot.policy.get_attention_weights()) if hasattr(self.robot.policy, 'get_matrix_A'): self.As.append(self.robot.policy.get_matrix_A()) if hasattr(self.robot.policy, 'get_feat'): self.feats.append(self.robot.policy.get_feat()) if hasattr(self.robot.policy, 'get_X'): self.Xs.append(self.robot.policy.get_X()) if hasattr(self.robot.policy, 'traj'): self.trajs.append(self.robot.policy.get_traj()) # update all agents self.robot.step(action) for human, action in zip(self.humans, human_actions): human.step(action) if self.nonstop_human and human.reached_destination(): self.generate_human(human) self.global_time += self.time_step self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans], [human.id for human in self.humans]]) self.robot_actions.append(action) self.rewards.append(reward) # compute the observation if self.robot.sensor == 'coordinates': ob = self.compute_observation_for(self.robot) elif self.robot.sensor == 'RGB': raise NotImplementedError else: if self.robot.sensor == 'coordinates': ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)] elif self.robot.sensor == 'RGB': raise NotImplementedError return ob, reward, done, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = ...
[ "0.69635177", "0.6676281", "0.6554985", "0.64697844", "0.63891464", "0.6335718", "0.6276497", "0.627058", "0.6184203", "0.61650044", "0.6161578", "0.6150834", "0.61403406", "0.61149937", "0.6105217", "0.6084121", "0.6073783", "0.60636634", "0.60587096", "0.6010782", "0.601078...
0.64085144
4
Compute the softmax of each element along an axis of X.
def softmax(X, theta = 1.0, axis = None): # make X at least 2d y = np.atleast_2d(X) # find axis if axis is None: axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1) # multiply y against the theta parameter, y = y * float(theta) # subtract the max for numerical stability y = y - np.expand_dims(np.max(y, axis = axis), axis) # exponentiate y y = np.exp(y) # take the sum along the specified axis ax_sum = np.expand_dims(np.sum(y, axis = axis), axis) # finally: divide elementwise p = y / ax_sum # flatten if X was 1D if len(X.shape) == 1: p = p.flatten() return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def softmax(self, x):\n\n out = np.zeros(x.shape)\n for i in range(x.shape[0]):\n max_x = x[i] - np.max(x[i])\n out[i] = np.exp(max_x) / np.sum(np.exp(max_x), axis=0)\n\n return out", "def softmax(x: npt.NDArray) -> npt.NDArray:\n row_wise_max = np.max(x, axis=1).res...
[ "0.86482805", "0.8523447", "0.8501702", "0.8450376", "0.8424998", "0.8401389", "0.83826745", "0.8379566", "0.83725315", "0.8363791", "0.83552516", "0.83450276", "0.8326659", "0.8309501", "0.8274441", "0.82731706", "0.8266031", "0.8248805", "0.8248805", "0.8244989", "0.824277"...
0.0
-1
floor the point to the next lower multiple of bucket_size
def bucketize(point, bucket_size): return bucket_size * math.floor(point / bucket_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('buc...
[ "0.7870765", "0.7870765", "0.6348058", "0.61719537", "0.61719537", "0.5968876", "0.59520507", "0.5859526", "0.5772838", "0.57719916", "0.5732552", "0.57283777", "0.5643887", "0.5643887", "0.5638288", "0.56255656", "0.5601112", "0.55790997", "0.55427814", "0.5534332", "0.55317...
0.79474443
0
buckets the points and counts how many in each bucket
def make_histogram(points, bucket_size): return Counter(bucketize(point, bucket_size) for point in points)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point /...
[ "0.7339186", "0.7339186", "0.6404966", "0.63198906", "0.63145477", "0.6261517", "0.6261517", "0.6104376", "0.6046917", "0.5988437", "0.59414226", "0.58722836", "0.58446556", "0.5792215", "0.57895434", "0.57877", "0.56928134", "0.5675051", "0.56433356", "0.5634637", "0.5604564...
0.7430182
0
find approximate inverse using binary search
def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001): # if not standard, compute standard and rescale if mu != 0 or sigma != 1: return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance) low_z, low_p = -10.0, 0 # normal_cdf(-10) is (very close to) 0 hi_z, hi_p = 10.0, 1 # normal_cdf(10) is (very close to) 1 while hi_z - low_z > tolerance: mid_z = (low_z + hi_z) / 2 # consider the midpoint mid_p = normal_cdf(mid_z) # and the cdf's value there if mid_p < p: # midpoint is still too low, search above it low_z, low_p = mid_z, mid_p elif mid_p > p: # midpoint is still too high, search below it hi_z, hi_p = mid_z, mid_p else: break return mid_z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binarysearch(a, i):\n l = 0\n r = len(a) - 1\n \n while l < r:\n m = l + (r-l)/2\n \n if a[m] == i:\n return m\n elif a[m] < i:\n l = m + 1\n else:\n ...
[ "0.62011486", "0.6161687", "0.6134568", "0.61064506", "0.60823286", "0.60589427", "0.60270196", "0.60241973", "0.59456474", "0.58296686", "0.58176553", "0.5816624", "0.5789889", "0.578986", "0.57876474", "0.5786067", "0.57808775", "0.5775977", "0.5749061", "0.57435906", "0.57...
0.0
-1
returns a random draw from a standard normal distribution
def random_normal(): return inverse_normal_cdf(random.random())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_normal():\n return inverse_normal_cdf(random.random())", "def normal(mean, std):\n\n return random.gauss(mean, std)", "def get_standard_normal_distribution():\n return np.random.normal(0, 1)", "def draw_normal(self):\n means, scale = self.get_means_and_scales()\n ret...
[ "0.80372727", "0.8026352", "0.7671266", "0.758368", "0.72377944", "0.6984565", "0.67650646", "0.6753574", "0.67039895", "0.6645325", "0.66249055", "0.65704095", "0.6569231", "0.6554965", "0.65050215", "0.6493456", "0.6487899", "0.64656204", "0.64537066", "0.64235955", "0.6408...
0.80891997
0
Function to execute the graph export pipeline
def run(self, example_input : Union[str,Path,None] = None) -> EasyDict : outputs = [] ok = True for export_config in self.export_configs : exporter = create_exporter( config=export_config, experiment_name=self.experiment_name, image_size=self.image_size, output_directory=(self.experiment_directory), ) ok = exporter( predictor=self.predictor, class_names=self.class_names, example_image_path=example_input ) and ok outputs.append(str(exporter.filename)) print('model is exported to:', ', '.join(outputs)) # TODO specify which export is failed result = EasyDict({'export_status' : ok}) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(argv):\n known_args, beam_options = parse_args(argv)\n\n yaml_string = known_args.dag.decode('string_escape')\n dag = yaml.load(yaml_string)\n\n pipeline_options = PipelineOptions(beam_options)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n p = beam.Pipeline(options=pipeline_o...
[ "0.6055563", "0.5933445", "0.5930202", "0.592405", "0.58405817", "0.5824905", "0.57319194", "0.5693886", "0.56896466", "0.56853735", "0.5645192", "0.56450754", "0.56329167", "0.56210643", "0.5618542", "0.5617108", "0.5615359", "0.55950373", "0.5586101", "0.55649096", "0.55558...
0.5214862
50
Transform request data to dict with 2 level of depth
def request_data_to_dict(data): if not isinstance(data, ImmutableMultiDict): raise ValueError('Input must be ImmutableMultiDict type.') res = {} for (key, value) in data.to_dict().items(): matches = re.match('(.*)\[(.*)\]', key) if matches: (key_lv_1, key_lv_2) = matches.groups() if key_lv_1 not in res: res[key_lv_1] = {} res[key_lv_1][key_lv_2] = value else: res[key] = value return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _to_request_dict(self):\n return {\"attr1\": self.attr1, \"attr2\": \"test\"}", "def to_dict(self, request) -> Dict[str, Any]:\n adict = self.__dict__.copy()\n adict[\"url\"] = self.href(adict[\"url\"], request)\n adict[\"img\"] = self.href(adict[\"img\"], request)\n if...
[ "0.6378962", "0.63289046", "0.6227822", "0.60884583", "0.6052619", "0.60420406", "0.60407573", "0.6025036", "0.59494644", "0.5901705", "0.5876816", "0.5818334", "0.5817537", "0.57859", "0.5757327", "0.5757108", "0.5747224", "0.5745498", "0.5735476", "0.5723966", "0.57147676",...
0.7102085
0
Sweeping from low to high brightness and back, continuously.
def Demo(controller_name, outputs): print 'Initiating controller %r ...\n' % controller_name box = getattr(controller, controller_name).FirstDevice(outputs=outputs) print '\nFade to white and back.' FadeOutputs(box, '#fff') FadeOutputs(box, '#000') print 'Fade to a random color and back to black, ad nauseum.' while True: FadeOutputs(box, utils.RandomColor()) FadeOutputs(box, '#000')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def twinkle(self, wait: float = DEFAULT_SPEED, count: int = 10):\n last = None\n for _ in range(count):\n c = randint(0, len(COLOR_TUPLE_LIST) - 1) # Choose random color index\n j = randint(0, self.np.n - 1) # Choose random pixel\n\n # Check that the pixel is off (d...
[ "0.6644794", "0.6367496", "0.63170844", "0.6283884", "0.6260947", "0.62575155", "0.6218248", "0.6202204", "0.6158688", "0.61483514", "0.6132683", "0.61230457", "0.6123004", "0.6075169", "0.6070032", "0.6054008", "0.603593", "0.60226864", "0.6017028", "0.60140914", "0.6007782"...
0.0
-1
Fades all outputs to the given color and waits for it to complete.
def FadeOutputs(box, color, steps=50): for output in box: output.Fade(color=color, steps=steps) time.sleep(steps / (float(box.frequency) / len(box)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_fade_colour(l, leds, r, g, b, duration):\n l._do_multi_led_command(\n create_fade_colour_command, leds, r, g, b, duration\n )", "def color_chase(self, color: tuple = CYAN, wait: float = DEFAULT_SPEED):\n for i in range(self.np.n):\n self.np[i] = color\n ...
[ "0.6856355", "0.66404843", "0.64948034", "0.6415791", "0.6347536", "0.62956667", "0.6144092", "0.6049132", "0.5983142", "0.59646887", "0.5947122", "0.5939072", "0.59066415", "0.58729315", "0.57774615", "0.5768261", "0.5764425", "0.5718582", "0.56916755", "0.56897503", "0.5660...
0.8030329
0
Processes commandline input to setup the demo.
def main(): import optparse import sys parser = optparse.OptionParser() parser.add_option('-c', '--controller', default='NewController', help='Controller class to instantiate.') parser.add_option('-o', '--outputs', type='int', default=5, help='Number of outputs to use on the hardware.') options, _arguments = parser.parse_args() try: Demo(options.controller, options.outputs) except controller.ConnectionError: sys.exit('ABORT: Could not find a suitable device.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup():\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--rows\", type=int, default=10, help=\"Number of rows to generate in demo file\")\n parser.add_argument(\"--filename\", type=str, default=\"..//data//demo.csv\", help=\"Filename of demo data\")\n parser....
[ "0.67759037", "0.6695762", "0.66869605", "0.6590389", "0.643101", "0.6393426", "0.63334584", "0.63311416", "0.62942785", "0.6283842", "0.6267345", "0.6248693", "0.6248693", "0.6229788", "0.6229788", "0.62010115", "0.6191372", "0.61896974", "0.6173674", "0.6158222", "0.6149269...
0.0
-1
A method for scrolling the page.
def scroll_down(driver): # Get scroll height. last_height = driver.execute_script( "return document.body.scrollHeight") while True: # Scroll down to the bottom. driver.execute_script( "window.scrollTo(0, document.body.scrollHeight);") # Wait to load the page. time.sleep(2) # Calculate new scroll height and compare with last scroll height. new_height = driver.execute_script( "return document.body.scrollHeight") if new_height == last_height: break last_height = new_height
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scroll(*args):", "def scroll_to(self):\n\n if self:\n pass", "def scroll_page(self):\n scroll_down = self.driver.find_element_by_tag_name(\"html\")\n scroll_down.send_keys(Keys.END)\n sleep(TestData.DELAY)\n scroll_down.send_keys(Keys.CONTROL + Keys.HOME)\n ...
[ "0.7725844", "0.71696216", "0.71549976", "0.7153446", "0.71490943", "0.7085735", "0.70825195", "0.6995313", "0.6939542", "0.69198406", "0.6776687", "0.6771578", "0.67377883", "0.6731459", "0.67016435", "0.6660212", "0.65802693", "0.64700073", "0.6467011", "0.6460393", "0.6429...
0.0
-1
This function returns the predicted integer. The imput is the pixel values from the imageprepare() function.
def predictint(test_images): # Define the model (same as when creating the model file) x = tf.placeholder(tf.float32, [None, image_size]) W = tf.Variable(tf.zeros([image_size, image_labels])) b = tf.Variable(tf.zeros([image_labels])) is_test = tf.placeholder(tf.bool) # Model Parameters W_conv1 = tf.get_variable("W_conv1", shape=[5, 5, 1, 32], initializer=weight_xavier_init(5 * 5 * 1, 32)) W_conv2 = tf.get_variable("W_conv2", shape=[5, 5, 32, 64], initializer=weight_xavier_init(5 * 5 * 32, 64)) W_fc1 = tf.get_variable("W_fc1", shape=[64 * 7 * 7, 1024], initializer=weight_xavier_init(64 * 7 * 7, 1024)) W_fc2 = tf.get_variable("W_fc2", shape=[1024, image_labels], initializer=weight_xavier_init(1024, image_labels)) b_conv1 = bias_variable([32]) b_conv2 = bias_variable([64]) b_fc1 = bias_variable([1024]) b_fc2 = bias_variable([image_labels]) x_image = tf.reshape(x, [-1, image_width, image_height, 1]) conv1 = conv2d(x_image, W_conv1) + b_conv1 conv1_bn = batchnorm(conv1, b_conv1, is_test, True) h_conv1 = tf.nn.relu(conv1_bn) h_pool1 = max_pool_2x2(h_conv1) conv2 = conv2d(h_pool1, W_conv2) + b_conv2 conv2_bn = batchnorm(conv2, b_conv2, is_test, True) h_conv2 = tf.nn.relu(conv2_bn) h_pool2 = max_pool_2x2(h_conv2) h_pool2_flat = tf.reshape(h_pool2, [-1, W_fc1.get_shape().as_list()[0]]) fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1 fc1_bn = batchnorm(fc1, b_fc1, is_test, False) h_fc1 = tf.nn.relu(fc1_bn) keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) init_op = tf.initialize_all_variables() saver = tf.train.Saver() """ Load the my-model file file is stored in the same directory as this python script is started Use the model to predict the integer. Integer is returend as list. Based on the documentatoin at https://www.tensorflow.org/versions/master/how_tos/variables/index.html """ predicted_lables = np.zeros(test_images.shape[0]) with tf.Session() as sess: sess.run(init_op) saver.restore(sess, "F:\PycharmProject\CNN_mnist_base\model\my-model") # print ("Model restored.") predict = tf.argmax(y_conv, 1) for i in range(0, test_images.shape[0]): imagein = test_images[i] predicted_lables[i] = predict.eval(feed_dict={x: [imagein], keep_prob: 1.0, is_test: False}, session=sess) sess.close() return predicted_lables
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, img):\n return self._predict([img])[0]", "def get_predictions(self, img):\n \n predictions = self.tf_model.predict_proba(img)\n prediction = np.argmax(predictions, axis=-1)\n \n return prediction", "def predict(image):\n with tf.Session(graph=graph...
[ "0.7003327", "0.6524388", "0.6392707", "0.6391232", "0.6360505", "0.63116616", "0.63055545", "0.62496394", "0.6202126", "0.614478", "0.60959804", "0.60379404", "0.6035026", "0.6001941", "0.59648454", "0.59607786", "0.5958487", "0.59524536", "0.59420645", "0.5912935", "0.59101...
0.0
-1
Returns the name the function should have in the Python api, based on the c++function name. For entry_type 'function', the cpp_name is used unmodified, otherwise strip everything before the first underscore, so that
def to_py_name(cpp_name, entry_type): if entry_type == 'function': return cpp_name first_underscore = cpp_name.find('_') assert(first_underscore != -1) return cpp_name[first_underscore + 1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _plugin_funcname(func):\n funcname = func.__name__.rstrip(\"_\")\n if funcname.startswith(\"__\"):\n return funcname + \"__\"\n return funcname", "def wrapper_function_name(text):\n text = GLGenerator.split_to_body_and_ext(text)\n body = text[0]\n ext = text[1]\n ...
[ "0.67665726", "0.6633372", "0.64924026", "0.64650005", "0.6389167", "0.63741195", "0.6252101", "0.62447554", "0.62166333", "0.61651427", "0.61233187", "0.6080773", "0.6080773", "0.60069233", "0.59802777", "0.5970677", "0.5940581", "0.5938672", "0.5932276", "0.5924244", "0.592...
0.84663165
0
Returns the name the property should have in the Python api, based on the C++ struct name.
def property_to_py_name(cpp_struct_name): first_underscore = cpp_struct_name.find('_') assert first_underscore != -1 return cpp_struct_name[first_underscore + 1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PropertyName(self) -> str:", "def property_name(self) -> str:\n return str(self.prop_name)", "def _get_name(x):\r\n if isinstance(x, Property) or isinstance(x, KeyIndex):\r\n return x.name\r\n elif isinstance(x, Edge):\r\n return x.label\r\n ...
[ "0.7078016", "0.6528718", "0.65224946", "0.6521", "0.6296151", "0.6226451", "0.622544", "0.6218055", "0.61775655", "0.615941", "0.6148613", "0.6143576", "0.6141541", "0.61411786", "0.6134688", "0.6088676", "0.6088676", "0.6035033", "0.6035033", "0.6035033", "0.6035033", "0....
0.8263242
0
Remove spurious endlines and such from the docstring specified in the comment preceding the C++python functions.
def clean_doc(doc): # Replace regular enter (i.e. mere comment formatting in cpp file) # with space doc = doc.replace("\n", " ") # The removal can cause a "hard enter" (literal \n) to get an unintended # trailing space - trim those. doc = doc.replace("\\n ", "\\n") return '"%s"' % doc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.s...
[ "0.7409109", "0.7175777", "0.71319836", "0.69368535", "0.69084775", "0.67596537", "0.64949363", "0.64829695", "0.6481948", "0.6396861", "0.6369261", "0.63360715", "0.6248399", "0.6155236", "0.61414677", "0.6126157", "0.61031824", "0.6062932", "0.60486543", "0.6028124", "0.600...
0.6236144
13
Determines the Python method type (METH_NOARGS or METH_VARARGS) from the C++ argument list and type of function.
def get_type(args_str, entry_type): # The C-method-implementations accept self as the first argument, # so a one-argument method will be invoked with zero arguments in Python. no_args = 1 if entry_type == "method" else 0 return ("METH_NOARGS" if len(args_str.split(",")) == no_args else "METH_VARARGS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type...
[ "0.61319065", "0.60803515", "0.5884141", "0.5874044", "0.58434784", "0.5799301", "0.56729174", "0.56608915", "0.5459384", "0.54216975", "0.54152423", "0.53767866", "0.53465176", "0.5273557", "0.52425903", "0.51958585", "0.51934904", "0.513849", "0.5122222", "0.5119435", "0.50...
0.6993184
0
Find any comment for including additional method definitions (e.g. handwritten for special cases) inside the generated methoddefs array.
def find_extra_include(file_name): extra_includes = [] with open(file_name) as f: for m in re.finditer(regex.extra_include, f.read()): extra_includes.append(m.groups(1)) return extra_includes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __find_methods(self):\r\n while self.__advance():\r\n if self.__tokenizer.token_type() == TYPES_DIC[\"KEYWORD\"] and \\\r\n self.__tokenizer.identifier() == TYPES_DIC[\"METHOD\"]:\r\n self.__advance(n=2)\r\n self.__method_list.append(self.__tok...
[ "0.58155406", "0.5515345", "0.54595226", "0.54332846", "0.5404431", "0.53915465", "0.5368009", "0.5353553", "0.5258822", "0.52563065", "0.5255658", "0.5237434", "0.52109283", "0.520928", "0.5189513", "0.51740175", "0.5138967", "0.51203406", "0.5094661", "0.5094661", "0.507519...
0.0
-1
Uses a more permissive variant of the commentregex used to recognize C++ methods to catch malformed comments. Raises ValueError if anything problematic is discovered.
def check_file(file_name): result = [] with open(file_name) as f: text = f.read() for m in re.finditer(regex.function_lenient, text, re.DOTALL|re.MULTILINE): m2 = re.match(regex.function_strict, m.group(0), re.DOTALL|re.MULTILINE) if m2: if m2.group(2).find('"') != -1: raise ValueError('Extra quote (") in method markup: %s' % m.group(0)) else: raise ValueError("Invalid method markup: %s" % m.group(0)) result.append(m.group(0)) end = m.end(0) if not text[end] == '\n': raise ValueError("Missing endline after method markup.") # Verify that the function is static. # This helps ensure that all functions are included in the # method-def (otherwise the compiler warns about unused static # function). # # Regular Faint functions aren't made specifically for a methoddef, # and may have external linkage (e.g. bitmap draw functions), # so allow extern as well, but require stating so explicitly. static = text[end+1:].startswith("static") extern = text[end+1:].startswith("extern") template = text[end+1:].startswith("template<typename T>") if not static and not extern and not template: rest = text[end+1:] methodLine = rest[:rest.find('\n')] raise ValueError("Marked up method not static or extern:\n%s " % methodLine[:-1]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_comments(self, args):\n\n for submission in args.comments:\n if any(char.isalpha() for char in submission[1]) \\\n or self._illegal_chars.search(submission[1]) != None:\n raise ValueError", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n #...
[ "0.656411", "0.628157", "0.6194034", "0.610862", "0.60144717", "0.59669393", "0.59666216", "0.5915157", "0.5909202", "0.58375543", "0.58352786", "0.5832811", "0.5806706", "0.5784541", "0.57556933", "0.57461137", "0.57419854", "0.57343096", "0.57110506", "0.5686666", "0.567892...
0.0
-1
Creates one entry for a PyMethodDef array from the entries for one function (as returned by parse_file).
def to_PyMethodDef_entry(items): entry_type = items[0] items = items[1:] if entry_type == 'method': return 'FORWARDER(%s, %s, "%s", %s)' % items elif entry_type == 'function': return 'FREE_FORWARDER(%s, %s, "%s", %s)' % items elif entry_type == 'method_template': return 'FORWARDER(%s<common_type>, %s, "%s", %s)' % items else: assert False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n me...
[ "0.75998366", "0.5944248", "0.55603445", "0.5401207", "0.5332072", "0.5176276", "0.5170217", "0.5108655", "0.50803465", "0.5051816", "0.50399566", "0.5013269", "0.4962817", "0.4942569", "0.49400118", "0.49136788", "0.4911339", "0.49019086", "0.4900917", "0.48928633", "0.48558...
0.7316116
1
Creates one entry for a PyGetSetDef array from the entries for one propertystruct (as returned by parse_file).
def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc): return 'PROPERTY_FORWARDER(%s, "%s", %s)' % ( cpp_struct_name, py_name, doc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} /...
[ "0.71594757", "0.5288491", "0.50017947", "0.49717405", "0.49567866", "0.49039754", "0.48913658", "0.47898185", "0.47757462", "0.47685832", "0.47616416", "0.47450364", "0.47120082", "0.46812397", "0.46606937", "0.46587437", "0.46313342", "0.46271035", "0.4625974", "0.46091345", ...
0.5890328
1
Creates a string of a CPyGetSetDef array named _getseters, containing all entries in the list (as created by to_PyGetSetDef_entry).
def to_PyGetSetDef(name, entries): getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries] getSetDef = ('static PyGetSetDef %s_getseters[] = {\n ' % name + ',\n '.join(getSetDefEntries) + ',\n ') getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\n};' return getSetDef
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSets():", "def get_drivers():\n return [str(d) for d in drivers.values()]", "def getset(self, name, value):\r\n return self.format_bulk('GETSET', name, value)", "def get_reader_funcs():\n return READERS", "def get_all(self):\n return [self.get(name) for name in self.factories.ite...
[ "0.5476404", "0.5249653", "0.51922673", "0.51390755", "0.5105949", "0.50751984", "0.5071153", "0.5018218", "0.50118506", "0.5000767", "0.4974314", "0.4955749", "0.49450973", "0.49285832", "0.49245515", "0.49245515", "0.4890076", "0.48870137", "0.48566785", "0.48400316", "0.48...
0.72217596
0
Creates a string of a CPyMethodDef array named _methods, containing all the entries in the list (as created by to_PyMethodDef_entry). Includes any include in the extra_includes list after the regular entries (before the sentinel).
def to_PyMethodDef(name, entries, extra_includes): methodEntries = [to_PyMethodDef_entry(items) for items in entries] if name is not None: methodDef = ('static PyMethodDef %s_methods[] = {\n ' % name + ',\n '.join(methodEntries) + ',\n ') else: methodDef = ',\n'.join(methodEntries) + ',\n ' for include in extra_includes: methodDef += '#include "%s"\n' % include if name is not None: methodDef += '{nullptr,nullptr,0,nullptr} // Sentinel\n};' return methodDef
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'meth...
[ "0.6497351", "0.6201312", "0.59603804", "0.5856507", "0.57363343", "0.5670631", "0.56094706", "0.56094706", "0.5410812", "0.5366973", "0.5323587", "0.52034914", "0.51885706", "0.51738644", "0.5158081", "0.51455", "0.51223594", "0.5052642", "0.5005883", "0.49957657", "0.494654...
0.7831075
0
Writes a PyMethodDef array and/or a PyGetSetDef to the specified file, featuring entries from the entries lists (as returned by parse_file).
def write_result(file_name, name, entries, extra_includes, src_file_names): with open(file_name, 'w', newline='\n') as f: f.write('// Generated by %s\n' % os.path.basename(__file__)) f.write('// Based on %s: %s\n' % (("this file" if len(src_file_names) < 2 else "these files"), ", ".join(src_file_names))) methods = entries[0] if len(methods) != 0: f.write(to_PyMethodDef(name, methods, extra_includes)) f.write('\n') properties = entries[1] if len(properties) != 0: f.write('\n') f.write(to_PyGetSetDef(name, properties))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n me...
[ "0.6717636", "0.5957228", "0.5844461", "0.5257296", "0.52116394", "0.51606745", "0.5049752", "0.49500108", "0.4917279", "0.4909298", "0.48687622", "0.4865209", "0.4790585", "0.47357494", "0.46259487", "0.46080807", "0.45991012", "0.45667055", "0.45524898", "0.4544485", "0.453...
0.5458704
3
Makes the docstring more suitable for html.
def doc_to_html(doc): doc = (doc.replace('\\n','<br>') .replace('->','&rarr;') .replace('...', '&#8230;') .replace('\\\\', '\\')) if doc.startswith('"'): doc = doc[1:] if doc.endswith('"'): doc = doc[:-1] return doc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DocString():\n return", "def main_docstring():", "def docstring_hack():\n pass", "def documentation():\n return auto.html()", "def documentation():\n return render_template('help.html')", "def func_doc():", "def __doc__(self, ???):", "def _add_doc(func, doc):\r\n func.__doc__ = doc...
[ "0.7670118", "0.76439434", "0.7628076", "0.7477301", "0.7309437", "0.7032047", "0.6866492", "0.6863824", "0.6863824", "0.6863824", "0.6820424", "0.6789934", "0.67546", "0.67455196", "0.6693798", "0.6692449", "0.66803527", "0.6592285", "0.6549525", "0.65468615", "0.654198", ...
0.0
-1
Writes an htmlfile documenting the passed in methods, using the docstrings (as returned by parse_file)
def write_method_doc(file_name, entries): with open(file_name, 'w', newline='\n') as f: f.write('<table border="0">') f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>') for items in sorted(entries, key=itemgetter(3)): f.write('<tr><td valign="top">%s</td><td>%s</td></tr>' % (items[3], doc_to_html(items[4]))) f.write('</table>')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n lo...
[ "0.60740525", "0.59743536", "0.59427845", "0.5824418", "0.5816547", "0.57680935", "0.5738121", "0.5711054", "0.56876665", "0.5671265", "0.56643975", "0.56380713", "0.5587306", "0.55807567", "0.5538951", "0.55344105", "0.5531077", "0.5515236", "0.54965585", "0.54924417", "0.54...
0.75820845
0
Writes an htmlfile documenting the passed in properties, using the docstrings (as returned by parse_file) Expects a list of (propertyname, docstr)tuples.
def write_property_doc(file_name, entries): if len(entries) == 0: return with open(file_name, 'w', newline='\n') as f: f.write('<!-- Generated by %s -->' % os.path.basename(__file__)) f.write('<table border="0">') f.write('<tr><td><b>Property</b></td><td><b>Description</b></td></tr>') for items in entries: f.write('<tr><td valign="top">%s</td><td>%s</td></tr>' % (items[1], doc_to_html(items[2]))) f.write('</table>')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_to_file(properties,file):\n properties['tempfile']=None\n properties['remove_temp']=True\n properties['outfile']=file", "def write_html(filelist):\n tmp = tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".html\", delete=False)\n\n tmp.write(r\"\"\"<!doctype html>\n <html>\...
[ "0.587501", "0.5714326", "0.55795753", "0.55168897", "0.5469178", "0.54641604", "0.5401127", "0.5393503", "0.538135", "0.5371805", "0.53659755", "0.53104347", "0.52841675", "0.5282833", "0.5242343", "0.51993394", "0.5180886", "0.51743466", "0.51662326", "0.510156", "0.5079105...
0.7246242
0
Generate the Python methoddef header and html documentation for the c++file indicated by src_file_name, by locating "special" Ccomments. The header is saved to dst_file_name and the html documentation to dst_doc_file_name. The name is used for the PyMethodDef and PyGetSetDef.
def generate(src_file_names, dst_file_name, dst_doc_file_name, dst_property_doc_file_name, name): methods = [] properties = [] extra_includes = [] entries = (methods, properties) for src_file_name in src_file_names: check_file(src_file_name) m, p = parse_file(src_file_name) methods.extend(m) properties.extend(p) extra_includes.extend(find_extra_include(src_file_name)) if len(entries[0]) == 0 and len(entries[1]) == 0: print("No entries found in %s." % src_file_name) exit(1) write_result(dst_file_name, name, entries, extra_includes, src_file_names) write_method_doc(dst_doc_file_name, entries[0]) write_property_doc(dst_property_doc_file_name, entries[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (...
[ "0.67592466", "0.58502406", "0.58490044", "0.5828653", "0.5795212", "0.5750393", "0.57325697", "0.5689374", "0.5534153", "0.5486456", "0.5484368", "0.54745", "0.546431", "0.54593277", "0.5433575", "0.5428016", "0.5416794", "0.5406283", "0.537415", "0.53738326", "0.53385717", ...
0.6063957
1
Generate headers with a Python methoddef array and html documentation tables for the listed source files.
def generate_headers(src_files, out_root, doc_root): if not os.path.exists(out_root): os.makedirs(out_root) did_print_heading = False changed = False for (name, files) in src_files: if files.__class__ == str: src = files files = (src,) else: src = files[0] dst = src.replace(".hh", "-method-def.hh") dst = dst.replace(".cpp", "-method-def.hh") dst = os.path.join(out_root, os.path.split(dst)[1]) dst_doc = src.replace(".hh", '-methods.txt') dst_doc = dst_doc.replace(".cpp", '-methods.txt') dst_doc_filename = os.path.split(dst_doc)[1] dst_doc_filename = os.path.join(doc_root, dst_doc_filename) dst_prop_doc = src.replace(".cpp", '-properties.txt') dst_doc_prop_filename = os.path.split(dst_prop_doc)[1] dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename) if util.changed(src, dst): if not did_print_heading: print("* Generating Python method definitions.") did_print_heading = True generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name) changed = True if not changed: print("* Python method definitions up to date.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-uti...
[ "0.6388133", "0.63291883", "0.6162227", "0.61605513", "0.605548", "0.5960463", "0.5946018", "0.58367574", "0.582316", "0.58196837", "0.58120084", "0.5807799", "0.5753701", "0.57469726", "0.57268125", "0.5716415", "0.5679393", "0.56756175", "0.5639785", "0.5602997", "0.5587299...
0.77620685
0
Fetch all the teams that are participating in a league.
def crawl_competition(cls, competition: Competition, follow_pagination: bool = False) -> int: count = 0 logger = CrawlerLogger.get_logger_for_class(cls) url = competition.get_fixtures_url() try: logger.log("crawl per league {}".format(url)) count = count + cls.crawl_by_url(competition, url) if follow_pagination: count = count + cls.follow_pagination(competition, url) except Exception as e: logger.error(e) rollbar.report_exc_info(sys.exc_info()) # TODO: statistics # if lock: # with lock: # self.statistics['fixtures'] += count # else: # self.statistics['fixtures'] += count return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_teams(self):\n return self._db.Teams.find({})", "def get_teams():", "def getAllTeams(self):\n return []", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def get_all_matches_by_league(self):\n raise...
[ "0.73225325", "0.7137867", "0.70343244", "0.6934072", "0.69133794", "0.68307376", "0.6677932", "0.6623947", "0.659409", "0.6592349", "0.6555678", "0.6542481", "0.64531434", "0.6439132", "0.6427429", "0.6416407", "0.6407396", "0.64073485", "0.6385755", "0.6368741", "0.6295079"...
0.0
-1
Construct an instance of ``client_class`` and register it under given alias.
def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs): kwargs.setdefault('serializer', serializer) conn = self._conns[alias] = client_class(**kwargs) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_client(self, client, client_name):\n self.clients[client_name] = client", "def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n ...
[ "0.5862595", "0.57551134", "0.57218677", "0.56946874", "0.56426543", "0.5604221", "0.5513979", "0.5479135", "0.5450147", "0.54027754", "0.53822577", "0.5351544", "0.5310474", "0.53071845", "0.5302867", "0.5245894", "0.52256596", "0.5221313", "0.52094626", "0.52094626", "0.520...
0.64954954
0
Return the offset in hours from UTC as a float.
def get_tz(self): for _, element in etree.iterparse(self.source): if element.tag == TIMEZONE: return float(element.text or 0.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tz_offset(self) -> float:\n return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60", "def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()", "def get_tz_offset_seconds() -...
[ "0.70775425", "0.6989547", "0.6908614", "0.67074597", "0.6508781", "0.64082044", "0.6225916", "0.6165682", "0.61237943", "0.6042478", "0.6030816", "0.59795976", "0.58823085", "0.5826774", "0.57685786", "0.5765773", "0.57652164", "0.5705759", "0.5696149", "0.5687313", "0.56625...
0.4868406
78
Localize tznaive TimeSeries to a fixedoffset time zone. Most time zones are offset from UTC by a whole number of hours, but a few are offset by 30 or 45 minutes. An offset of 0 is specialcased to return UTC.
def tz_localize(dataframe, offset_in_hours=0, copy=True): return dataframe.tz_localize(FixedOffset(offset_in_hours * 60), copy=copy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fixed_timezone(offset):\n\n if isinstance(offset, timedelta):\n offset = offset.total_seconds() // 60\n\n sign = \"-\" if offset < 0 else \"+\"\n hhmm = \"%02d%02d\" % divmod(abs(offset), 60)\n name = sign + hhmm\n\n return timezone(timedelta(minutes=offset), name)", "def get_fixed_...
[ "0.733858", "0.73315036", "0.7123728", "0.6075806", "0.5935274", "0.58467084", "0.5763584", "0.5721623", "0.5694093", "0.56887317", "0.56338304", "0.5555357", "0.555062", "0.55430114", "0.5529573", "0.5487007", "0.54707557", "0.54601884", "0.5453896", "0.5438302", "0.5430976"...
0.64824516
3
Convert sequence of evenly spaced nonnegative ints to equivalent slice. If the returned slice object is `s = slice(start, stop, step)`, the
def _range_to_slice(index): if not len(index): return slice(None, 0, None) if any(i < 0 for i in index): raise ValueError(f'Could not convert {index} to a slice ' '(contains negative elements)') increments_left = set(np.diff(index)) step = increments_left.pop() if increments_left else 1 if step == 0 or increments_left: raise ValueError(f'Could not convert {index} to a slice ' '(unevenly spaced or zero increments)') start = index[0] stop = index[-1] + step # Avoid descending below 0 and thereby wrapping back to the top return slice(start, stop if stop >= 0 else None, step)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]", "def normalize_slice(s):\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n ...
[ "0.8011135", "0.7739454", "0.74094546", "0.72735506", "0.717713", "0.70792943", "0.7041868", "0.690513", "0.6796296", "0.6779164", "0.6697571", "0.6686825", "0.66782004", "0.66213644", "0.6585468", "0.6580096", "0.647419", "0.6415965", "0.64123", "0.63498336", "0.63340086", ...
0.7165115
5
Generate an equivalent index expression that is cheaper to evaluate. Advanced ("fancy") indexing using arrays/lists of booleans or ints is much slower than basic indexing using slices and scalar ints in dask. If the fancy index on a specific axis/dimension selects a range with a fixed (nonzero) step size between indices, however, it can be converted into an equivalent slice to get a simple index instead. Note that when indexing along multiple axes with arrays, this may change the semantics of the indexing (see NumPy's `NEP 21`_ for details). This simplification is only guaranteed to be safe when used with outer indexing.
def _simplify_index(indices, shape): # First clean up and check indices, unpacking ellipsis and boolean arrays indices = da.slicing.normalize_index(indices, shape) out = [] axis = 0 for index in indices: if index is not np.newaxis: length = shape[axis] axis += 1 # If there is 1-D fancy index on this axis, try to convert to slice if isinstance(index, np.ndarray) and index.ndim == 1: try: index = _range_to_slice(index) except ValueError: pass else: index = da.slicing.normalize_slice(index, length) out.append(index) return tuple(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n...
[ "0.658317", "0.6367172", "0.6293228", "0.629157", "0.62566864", "0.60536855", "0.60536855", "0.60373294", "0.5884561", "0.58782953", "0.58534575", "0.58327305", "0.5827532", "0.5753103", "0.5751057", "0.5711585", "0.5674995", "0.5662051", "0.56028384", "0.5559749", "0.5525", ...
0.6160974
5
Perform outer indexing on dask array `x`, one dimension at a time. It is assumed that `indices` is suitably normalised (no ellipsis, etc.)
def _dask_oindex(x, indices): axis = 0 for index in indices: x = da.take(x, index, axis=axis) # If axis wasn't dropped by a scalar index: if not isinstance(index, Integral): axis += 1 return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dask_getitem(x, indices):\n indices = _simplify_index(indices, x.shape)\n try:\n out = x[indices]\n except NotImplementedError:\n out = _dask_oindex(x, indices)\n # dask does culling anyway as part of optimization, but it first calls\n # ensure_dict, which copies all the keys, pres...
[ "0.6630981", "0.65513384", "0.625214", "0.60793054", "0.5899021", "0.58930767", "0.584284", "0.58320093", "0.57988596", "0.57887155", "0.57549566", "0.5752462", "0.57381105", "0.5714157", "0.5673157", "0.5671463", "0.56660175", "0.56258434", "0.56149113", "0.5592513", "0.5555...
0.75664073
0
Index a dask array, with ND fancy index support and better performance. This is a dropin replacement for ``x[indices]`` that goes one further by implementing "ND fancy indexing" which is still unsupported in dask. If `indices` contains multiple fancy indices, perform outer (`oindex`) indexing. This behaviour deviates from NumPy, which performs the more general (but also more obtuse) vectorized (`vindex`) indexing in this case. See NumPy `NEP 21`_, `dask 433`_ and `h5py 652`_ for more details.
def dask_getitem(x, indices): indices = _simplify_index(indices, x.shape) try: out = x[indices] except NotImplementedError: out = _dask_oindex(x, indices) # dask does culling anyway as part of optimization, but it first calls # ensure_dict, which copies all the keys, presumably to speed up the # case where most keys are retained. A lazy indexer is normally used to # fetch a small part of the data. if np.product(out.numblocks) < 0.5 * np.product(x.numblocks): dsk = dask.optimization.cull(out.dask, out.__dask_keys__())[0] out.dask = dask.highlevelgraph.HighLevelGraph.from_collections(out.name, dsk) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _dask_oindex(x, indices):\n axis = 0\n for index in indices:\n x = da.take(x, index, axis=axis)\n # If axis wasn't dropped by a scalar index:\n if not isinstance(index, Integral):\n axis += 1\n return x", "def array_array_index(array, indices):\n if indices.shape[1...
[ "0.7357183", "0.70139194", "0.6808042", "0.678235", "0.65021425", "0.6417131", "0.6361745", "0.6331414", "0.6304895", "0.6261165", "0.60468435", "0.5991131", "0.5980531", "0.5952801", "0.5908524", "0.58395267", "0.5770695", "0.57215935", "0.5679449", "0.56439877", "0.564334",...
0.6581825
4
Determine appropriate name for callable `f` (akin to function name).
def _callable_name(f): try: return f.__name__ except AttributeError: if isinstance(f, partial): return f.func.__name__ return f.__class__.__name__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def name_func(func, num, params):...
[ "0.72435784", "0.68455863", "0.67366433", "0.6668289", "0.6594174", "0.6500698", "0.6486582", "0.6452623", "0.6450915", "0.64476234", "0.640476", "0.640476", "0.63907254", "0.63632125", "0.63278747", "0.6300249", "0.6296908", "0.6286463", "0.62828344", "0.6204712", "0.6204712...
0.8215907
0
Short humanfriendly string representation of lazy transform object.
def __repr__(self): class_name = self.__class__.__name__ dtype = 'unchanged' if self.dtype is None else self.dtype return f"<katdal.{class_name} '{self.name}': type '{dtype}' at {id(self):#x}>"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n names = [self.name]\n names += [_callable_name(transform) for transform in self.transforms]\n return ' | '.join(names) + f' -> {self.shape} {self.dtype}'", "def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n ...
[ "0.68118584", "0.6719526", "0.6454018", "0.6439769", "0.62236834", "0.62182367", "0.6107003", "0.60770506", "0.6071675", "0.6052423", "0.6029779", "0.5984073", "0.5969814", "0.5924413", "0.59127647", "0.5897758", "0.5897036", "0.58890736", "0.5888635", "0.586922", "0.5840094"...
0.0
-1
Transform data (`keep` is userspecified secondstage index).
def __call__(self, data, keep): return self.transform(data, keep)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, dataframe: DataFrame) -> DataFrame:", "def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")...
[ "0.55106413", "0.54800296", "0.53775775", "0.5366347", "0.5208422", "0.52055204", "0.51941574", "0.5188958", "0.5141277", "0.51131773", "0.5106497", "0.510201", "0.50470954", "0.5045959", "0.502488", "0.49978232", "0.49935693", "0.4965403", "0.49543115", "0.4952945", "0.49435...
0.5873317
0
Short humanfriendly string representation of lazy indexer object.
def __repr__(self): return "<katdal.{} '{}': shape {}, type {} at {:#x}>".format( self.__class__.__name__, self.name, self.shape, self.dtype, id(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return \"{}_human\".format(self.index)", "def __str__(self) -> str:\n return f\"K{self._index_to_unicode(self.index)}\"", "def __repr__(self):\n return str(self.index)", "def __repr__(self):\n return \"%s[%s]\" % (self.index_type, self.query)", "def short_st...
[ "0.6915522", "0.68598956", "0.6718207", "0.6625014", "0.6598968", "0.65907544", "0.6485251", "0.64300734", "0.6380875", "0.62672544", "0.62349004", "0.6124259", "0.609269", "0.60701317", "0.60654444", "0.6030815", "0.60238296", "0.59876156", "0.59537524", "0.5930237", "0.5860...
0.0
-1
Helper function to create strings for display (limits dtype length).
def _name_shape_dtype(self, name, shape, dtype): dtype_str = (str(dtype)[:50] + '...') if len(str(dtype)) > 50 else str(dtype) return f"{name} -> {shape} {dtype_str}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n largest_element = max(self)\n length = int(log10(largest_element)) + 1\n\n s = '\\n'.join([' '.join([\n f\"{elem:>{length}}\" for elem in row])\n for row in self.data])\n return s + '\\n'", "def __str__(self):\n if self.defau...
[ "0.66722065", "0.6646886", "0.66436577", "0.6635707", "0.6561914", "0.6553865", "0.64629567", "0.6437168", "0.6394366", "0.63221836", "0.6237532", "0.62296236", "0.6201392", "0.61959225", "0.61572546", "0.615381", "0.61407906", "0.612819", "0.61144656", "0.610394", "0.6093792...
0.5689976
93
Verbose humanfriendly string representation of lazy indexer object.
def __str__(self): shape, dtype = self._initial_shape, self._initial_dtype descr = [self._name_shape_dtype(self.name, shape, dtype)] for transform in self.transforms: shape, dtype = transform.new_shape(shape), transform.dtype if transform.dtype is not None else dtype descr += ['-> ' + self._name_shape_dtype(transform.name, shape, dtype)] return '\n'.join(descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return str(self.index)", "def __str__(self):\n return \"{}_human\".format(self.index)", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.indices})\"", "def __repr__(self):\n return \"%s[%s]\" % (self.index_type, self.query)", "def get...
[ "0.67205507", "0.67017853", "0.6618106", "0.65178007", "0.6516238", "0.6503144", "0.62764376", "0.6239224", "0.6238777", "0.6221722", "0.6112763", "0.6100795", "0.6047101", "0.6037867", "0.6026874", "0.6009232", "0.6002592", "0.59721404", "0.5880291", "0.58769834", "0.5845483...
0.0
-1
Extract a selected array from the underlying dataset. This applies the given secondstage index on top of the firststage index and retrieves the relevant data from the dataset as an array, optionally transforming it afterwards.
def __getitem__(self, keep): ndim = len(self.dataset.shape) # Ensure that keep is a tuple (then turn it into a list to simplify further processing) keep = list(keep) if isinstance(keep, tuple) else [keep] # The original keep tuple will be passed to data transform chain original_keep = tuple(keep) # Ensure that keep is same length as data dimension (truncate or pad with blanket slices as necessary) keep = keep[:ndim] + [slice(None)] * (ndim - len(keep)) # Map current selection to original data indices based on any existing initial selection, per data dimension keep = [(dkeep if dlookup is None else dlookup[dkeep]) for dkeep, dlookup in zip(keep, self._lookup)] # Iterate over dimensions of dataset, storing information on selection on each dimension: # `selection` is a list with one element per dimension; each element is a list of contiguous segments along # the dimension, and each segment is represented by a tuple of 3 elements: # (dataset selection, post-selection, output array selection) # Similarly, `segment_sizes` is a list of lists of segment lengths (empty lists for scalar-selected dimensions) selection, segment_sizes = [], [] for dim_keep, dim_len in zip(keep, self.dataset.shape): if np.isscalar(dim_keep): # If selection is a scalar, pass directly to dataset selector and remove dimension from output selection.append([(dim_keep, None, None)]) segment_sizes.append([]) elif isinstance(dim_keep, slice): # If selection is a slice, pass directly to dataset selector without post-selection start, stop, stride = dim_keep.indices(dim_len) segm_size = len(range(start, stop, stride)) selection.append([(slice(start, stop, stride), slice(None), slice(0, segm_size, 1))]) segment_sizes.append([segm_size]) elif len(dim_keep) == 0: # If selection is empty, pass to post-selector, as HDF5 datasets do not support zero-length selection selection.append([(slice(0, 1, 1), slice(0, 0, 1), slice(0, 0, 1))]) segment_sizes.append([0]) else: # Anything else is advanced indexing via bool or integer sequences dim_keep = np.atleast_1d(dim_keep) # Turn boolean mask into integer indices (True means keep that index) if dim_keep.dtype == bool and len(dim_keep) == dim_len: dim_keep = np.nonzero(dim_keep)[0] elif not np.all(dim_keep == np.unique(dim_keep)): raise TypeError('LazyIndexer cannot handle duplicate or unsorted advanced integer indices') # Split indices into multiple contiguous segments (specified by first and one-past-last data indices) jumps = np.nonzero(np.diff(dim_keep) > 1)[0] first = [dim_keep[0]] + dim_keep[jumps + 1].tolist() last = dim_keep[jumps].tolist() + [dim_keep[-1]] segments = np.c_[first, np.array(last) + 1] if len(dim_keep) > 0.2 * dim_len and len(segments) > 1: # If more than 20% of data are selected in 2 or more separate segments (the Ratcliffian benchmark), # select data at dataset level with a single slice spanning segments and then postselect the ndarray selection.append([(slice(segments[0, 0], segments[-1, 1], 1), dim_keep - dim_keep[0], slice(0, len(dim_keep), 1))]) segment_sizes.append([len(dim_keep)]) else: # Turn each segment into a separate slice at dataset level without post-selection, # and construct contiguous output slices of the same segment sizes segm_sizes = [end - start for start, end in segments] segm_starts = np.cumsum([0] + segm_sizes) selection.append([(slice(start, end, 1), slice(None), slice(segm_starts[n], segm_starts[n + 1], 1)) for n, (start, end) in enumerate(segments)]) segment_sizes.append(segm_sizes) # Short-circuit the selection if all dimensions are selected with scalars (resulting in a scalar output) if segment_sizes == [[]] * ndim: out_data = self.dataset[tuple([select[0][0] for select in selection])] else: # Use dense N-dimensional meshgrid to slice data set into chunks, based on segments along each dimension chunk_indices = np.mgrid[[slice(0, len(select), 1) for select in selection]] # Pre-allocate output ndarray to have the correct shape and dtype (will be at least 1-dimensional) out_data = np.empty([np.sum(segments) for segments in segment_sizes if segments], dtype=self.dataset.dtype) # Iterate over chunks, extracting them from dataset and inserting them into the right spot in output array for chunk_index in chunk_indices.reshape(ndim, -1).T: # Extract chunk from dataset (don't use any advanced indexing here, only scalars and slices) dataset_select = tuple([select[segment][0] for select, segment in zip(selection, chunk_index)]) chunk = self.dataset[dataset_select] # Perform post-selection on chunk (can be fancier / advanced indexing because chunk is now an ndarray) post_select = [select[segment][1] for select, segment in zip(selection, chunk_index)] # If any dimensions were dropped due to scalar indexing, drop them from post_select/out_select tuples post_select = tuple([select for select in post_select if select is not None]) # Do post-selection one dimension at a time, as ndarray does not allow simultaneous advanced indexing # on more than one dimension. This caters for the scenario where more than one dimension satisfies # the Ratcliffian benchmark (the only way to get advanced post-selection). for dim in range(len(chunk.shape)): # Only do post-selection on this dimension if non-trivial (otherwise an unnecessary copy happens) if not (isinstance(post_select[dim], slice) and post_select[dim] == slice(None)): # Prepend the appropriate number of colons to the selection to place it at correct dimension chunk = chunk[[slice(None)] * dim + [post_select[dim]]] # Determine appropriate output selection and insert chunk into output array out_select = [select[segment][2] for select, segment in zip(selection, chunk_index)] out_select = tuple([select for select in out_select if select is not None]) out_data[out_select] = chunk # Apply transform chain to output data, if any return reduce(lambda data, transform: transform(data, original_keep), self.transforms, out_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(arrays, index):\n if arrays is None or any(i is None for i in arrays):\n return arrays\n return tuple(i.ravel()[index] for i in arrays)", "def __getitem__(self, index: int) -> Tuple[Any, Any]:\n datum, target = self.data[index], self.targets[index]\n \n if self.transf...
[ "0.5978752", "0.57524294", "0.5708342", "0.55696416", "0.556528", "0.5534159", "0.55027837", "0.54934233", "0.54812884", "0.54775065", "0.54741627", "0.54486006", "0.5447731", "0.54420215", "0.54378104", "0.5379849", "0.537284", "0.5363644", "0.5344681", "0.5316661", "0.53088...
0.5002727
73
Transformations that are applied after firststage indexing.
def transforms(self): return self._transforms
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform():", "def _apply_transform(self):\n pass", "def transform():\n pass", "def transform(self):", "def reconstruct_input(self, ix):", "def _calculate_transforms(self):\n\n self._logger.info(\"Generating transformations.\")\n\n # Calculate partial transforms - get par...
[ "0.64521396", "0.6436142", "0.625093", "0.60931164", "0.59010315", "0.5887653", "0.5802023", "0.5802023", "0.5802023", "0.5802023", "0.5802023", "0.5802023", "0.5802023", "0.5789174", "0.5749093", "0.57337826", "0.57290673", "0.5708371", "0.5691743", "0.56447583", "0.5632075"...
0.0
-1
Array after firststage indexing and transformation.
def dataset(self): with self._lock: if self._dataset is None: if isinstance(self._orig_dataset, DaskLazyIndexer): self._orig_dataset = self._orig_dataset.dataset dataset = dask_getitem(self._orig_dataset, self.keep) for transform in self.transforms: dataset = transform(dataset) self._dataset = dataset self._orig_dataset = None return self._dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def array(self):", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def __array__(self):\n return self.array", "def __array__(self):\n return np.asarray(self.data)", "def transform(self, x: Array2D) -> Array2D:", "def __call__(self):\n return self.array"...
[ "0.7165916", "0.6688537", "0.65816486", "0.6539245", "0.6529321", "0.65116245", "0.6501995", "0.6398836", "0.6352268", "0.63407344", "0.6338532", "0.62778264", "0.6249261", "0.6225504", "0.62233794", "0.62099195", "0.6193239", "0.618922", "0.61748564", "0.61565554", "0.615107...
0.0
-1
Extract a selected array from the underlying dataset. This applies the given secondstage index on top of the current dataset, which already has a firststage index and optional transforms applied to it. The indexer also finally stops being lazy and triggers dask computation to arrive at the output array. Both indexing stages perform "outer" indexing (aka oindex), which indexes each dimension independently. This is especially relevant for advanced or fancy indexing.
def __getitem__(self, keep): return self.get([self], keep)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)", "def c...
[ "0.6476484", "0.61325866", "0.5939623", "0.5834367", "0.5815109", "0.5787023", "0.578326", "0.5776111", "0.56746083", "0.563971", "0.5628474", "0.5548305", "0.5501211", "0.5498332", "0.5483299", "0.54740727", "0.54704463", "0.5469028", "0.54643875", "0.54544646", "0.54269475"...
0.0
-1
Extract several arrays from the underlying dataset.
def get(cls, arrays, keep, out=None): kept = [dask_getitem(array.dataset, keep) for array in arrays] # Workaround for https://github.com/dask/dask/issues/7187 # This is equivalent to da.compute(kept), but does not allocate # excessive memory and is potentially faster. if out is None: out = [np.empty(array.shape, array.dtype) for array in kept] da.store(kept, out, lock=False) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_iter_data(dataset):\n num_samples = dataset.num_examples\n\n handle = dataset.open()\n features = []\n targets = []\n for i in xrange(num_samples):\n data = dataset.get_data(handle)\n features.append(data[0])\n targets.append(data[1])\n\n dataset.close(handle)\n\n ...
[ "0.6656692", "0.66058266", "0.65474606", "0.65298635", "0.6464468", "0.64520764", "0.6305162", "0.624804", "0.62314004", "0.6222413", "0.62146145", "0.6206641", "0.61618036", "0.6148572", "0.61412054", "0.60787904", "0.6073376", "0.6064661", "0.6064661", "0.6059199", "0.60515...
0.0
-1
Short humanfriendly string representation of indexer object.
def __repr__(self): return "<katdal.{} '{}': shape {}, type {} at {:#x}>".format( self.__class__.__name__, self.name, self.shape, self.dtype, id(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self) -> str:\n return f\"K{self._index_to_unicode(self.index)}\"", "def __str__(self):\n return \"{}_human\".format(self.index)", "def __repr__(self):\n return \"%s[%s]\" % (self.index_type, self.query)", "def __repr__(self):\n return str(self.index)", "def __repr__...
[ "0.7194278", "0.7162457", "0.711871", "0.69272774", "0.6822385", "0.6694754", "0.6636655", "0.6632189", "0.66048366", "0.6591673", "0.65751594", "0.65524185", "0.640755", "0.63549006", "0.62816143", "0.61983436", "0.6137607", "0.6099416", "0.60898465", "0.6058687", "0.6045976...
0.0
-1
Verbose humanfriendly string representation of indexer object.
def __str__(self): names = [self.name] names += [_callable_name(transform) for transform in self.transforms] return ' | '.join(names) + f' -> {self.shape} {self.dtype}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return \"%s[%s]\" % (self.index_type, self.query)", "def __str__(self):\n return \"{}_human\".format(self.index)", "def __repr__(self):\n return str(self.index)", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.indices})\"", "def __s...
[ "0.7050152", "0.70169646", "0.7002474", "0.6923666", "0.6894936", "0.67668396", "0.66782624", "0.6649045", "0.6633035", "0.65953916", "0.6495072", "0.639294", "0.63205606", "0.63171315", "0.63001937", "0.6277957", "0.6208957", "0.6134051", "0.608598", "0.6084308", "0.6039816"...
0.0
-1
Shape of array after firststage indexing and transformation.
def shape(self): return self.dataset.shape
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shape(self):\n return self.array.shape", "def shape(self):\n return self.to_array().shape", "def shape(self):\n return self.X.shape", "def shape(self):\n return self[0].shape", "def shape(self,squeeze=True):\n return np.shape(self.getData(squeeze=squeeze))", "def sh...
[ "0.74618185", "0.72965395", "0.72233784", "0.7130862", "0.71119326", "0.70900625", "0.697013", "0.6945931", "0.6945931", "0.6945931", "0.6945931", "0.6945931", "0.69400984", "0.68968654", "0.68968654", "0.68957853", "0.68937385", "0.68937385", "0.68937385", "0.684294", "0.683...
0.66548723
26
Data type of array after firststage indexing and transformation.
def dtype(self): return self.dataset.dtype
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datatype_name(self):\n return 'array'", "def type_array():\n return []", "def get_data_type(self, idx):\n return(self.data[idx].dtype)", "def astype(self, dtype):\n return NoneArray", "def dtype(self):\n return self.array.dtype", "def test_hk_getdata_field_array_typ...
[ "0.7009306", "0.68862", "0.6857213", "0.6660171", "0.6584545", "0.6563583", "0.6512989", "0.6492781", "0.64653784", "0.64411944", "0.6405585", "0.64019823", "0.63657856", "0.63524574", "0.6346064", "0.63260984", "0.6315588", "0.6220689", "0.6218643", "0.62170565", "0.6210315"...
0.62220097
17
Save the post data when adding a new item.
def perform_create(self, serializer): serializer.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _store(self):\n self._post_item.save()\n self._attachment_item.save()\n self._marshaller.marshall(self._post_item)", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def post(self):\n data = request.json\n return save_new_post(data=data)", ...
[ "0.73508483", "0.7192973", "0.71141416", "0.6993671", "0.6968998", "0.6730203", "0.6696825", "0.66588724", "0.66392076", "0.6593812", "0.65399164", "0.65288574", "0.64822817", "0.64620656", "0.645195", "0.6377771", "0.63727283", "0.636567", "0.63141656", "0.6302436", "0.62708...
0.0
-1
Initialises the ``InputDevice`` object and starts ``pifacecad.SwitchEventListener``. Also, registers callbacks to ``press_key`` method.
def __init__(self): self.cad = pifacecad.PiFaceCAD() self.listener = pifacecad.SwitchEventListener(chip=self.cad) for i in range(8): self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key) self.listener.activate() atexit.register(self.atexit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()", "def setInput(self):\n gpio.setup(self.bcm_id, gpio.IN, pull_up_down=self.pull)\n self.mode = gpio.IN", "def startCallback (self):\n if self.hasCallback:\n return\n...
[ "0.6211504", "0.6013261", "0.5729392", "0.56921124", "0.5665231", "0.5625194", "0.5592315", "0.5584637", "0.5559815", "0.5458288", "0.54401493", "0.53518695", "0.5323422", "0.5239154", "0.5230533", "0.5202559", "0.51961684", "0.5169283", "0.5162053", "0.51603216", "0.5147439"...
0.6555623
0
Does nothing as it's not easy to stop SwitchEventListener and be able to start it afterwards. Sets a flag for press_key, though.
def start(self): self.active = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_press(key):\n try:\n # gets pressed key char value and searches it from dict with get method.\n mapped_key = key_mappings.get(key.char) # gets value and type tuple or None\n if mapped_key:\n module.pressed_key = mapped_key\n except AttributeError:\n traceback.pr...
[ "0.6753262", "0.6747262", "0.6747262", "0.6744969", "0.652294", "0.6501145", "0.6453141", "0.64297837", "0.63286453", "0.6319466", "0.63019174", "0.6213256", "0.62122375", "0.6172587", "0.6166433", "0.6166433", "0.6082926", "0.60826945", "0.60654783", "0.606481", "0.6039859",...
0.0
-1
Does nothing as it's not easy to stop SwitchEventListener and be able to start it afterwards. Unsets a flag for press_key, though.
def stop(self): self.active = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_press(self):\n self.pressed = True", "def on_press(self):\n self.pressed = True", "def on_press(key):\n global key_pressed\n try:\n if key == keyboard.Key.enter:\n key_pressed = True\n # Stop listener\n return False\n except AttributeError:\...
[ "0.66097456", "0.66097456", "0.6510256", "0.65034145", "0.6415514", "0.6415514", "0.6330303", "0.6317401", "0.63108677", "0.6278477", "0.6265347", "0.61935896", "0.61799365", "0.6156864", "0.6116449", "0.61161727", "0.6018095", "0.6011536", "0.5990823", "0.5985273", "0.596756...
0.0
-1
Converts event numbers to keycodes using ``mapping`` and sends them to ``send_key``. Is a callback for ``SwitchEventListener``.
def press_key(self, event): if self.active: keycode = self.mapping[event.pin_num] while self.busy: sleep(0.01) self.busy = True self.send_key(keycode) self.busy = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_key_event(self, key, modifiers, mapping):\n if key in mapping:\n for callback in mapping[key]:\n callback()", "def send_key_event(self, event_code):\r\n self.adb.exec_cmd(\"shell input keyevent %s\", event_code)", "def on_key_press(self, key, callback):\n ...
[ "0.6784899", "0.597635", "0.56877905", "0.5675784", "0.5614539", "0.5593338", "0.5532968", "0.55294126", "0.55215394", "0.540672", "0.5382363", "0.53706765", "0.5357638", "0.53537667", "0.53112364", "0.5301527", "0.5283345", "0.52698326", "0.5251863", "0.5233475", "0.52283376...
0.5923485
2
A hook to be overridden by ``InputListener``. Otherwise, prints out key names as soon as they're pressed so is useful for debugging.
def send_key(self, keycode): print(keycode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_press_show_key(key):\n print(f\"{key} pressed\")", "def keyboard(self, *args):\n return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)", "def keyboard_action(self, event):\n name = event.name\n if len(name) > 1:\n if name == \"space\":\n name = \" \"\n...
[ "0.7189773", "0.66043854", "0.6394544", "0.62418854", "0.6209634", "0.619093", "0.6109771", "0.6083051", "0.59850484", "0.5918823", "0.5909523", "0.58950806", "0.587085", "0.5851221", "0.5839298", "0.57863104", "0.57457864", "0.57444", "0.573886", "0.5733217", "0.5726372", ...
0.53978956
45
Just sets the flag, listener is already running from the very start
def activate(self): self.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_start(self):\n self.state = STARTED", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n ...
[ "0.68543714", "0.67638534", "0.67638534", "0.67638534", "0.67638534", "0.67638534", "0.67638534", "0.67638534", "0.67638534", "0.6648839", "0.66194665", "0.6617628", "0.6617628", "0.65768045", "0.651719", "0.6454057", "0.64307195", "0.6423843", "0.63908666", "0.6331499", "0.6...
0.0
-1
Compute x @ y, broadcasting over the first `N 2` ranks.
def _matmul_broadcast(x, y, name): with tf.variable_scope(name) as scope: return tf.reduce_sum( tf.nn.dropout(x[..., tf.newaxis] * y[..., tf.newaxis, :, :],1), axis=-2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:...
[ "0.564049", "0.5376758", "0.5373917", "0.53423446", "0.53102696", "0.5265047", "0.5245262", "0.5209597", "0.5205773", "0.51244324", "0.51001847", "0.5094103", "0.5083796", "0.5059849", "0.5039474", "0.50308985", "0.50294924", "0.49719357", "0.49697846", "0.49624175", "0.49606...
0.50320524
15
Wrapper over _get_variable_wrapper() to get weights, with weights decay factor in loss.
def _get_weights_wrapper( name, shape, dtype=tf.float32, initializer=initializers.xavier_initializer(), weights_decay_factor=None ): weights = _get_variable_wrapper( name=name, shape=shape, dtype=dtype, initializer=initializer ) if weights_decay_factor is not None and weights_decay_factor > 0.0: weights_wd = tf.multiply( tf.nn.l2_loss(weights), weights_decay_factor, name=name + '/l2loss' ) tf.add_to_collection('losses', weights_wd) return weights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _variable_with_weight_decay(self, shape, stddev, wd):\n\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = tf.get_variable('weights', shape=shape,\n initializer=initializer)\n\n# if wd and (not tf.get_variable_scope().reuse):\n# w...
[ "0.77637476", "0.77271247", "0.7695299", "0.75827795", "0.75666004", "0.75396603", "0.7501326", "0.7492685", "0.7492685", "0.74766654", "0.74586254", "0.74359125", "0.73115295", "0.7291328", "0.72828215", "0.72800875", "0.7099408", "0.68967044", "0.67947274", "0.6773664", "0....
0.78818905
0
Wrapper over _get_variable_wrapper() to get bias.
def _get_biases_wrapper( name, shape, dtype=tf.float32, initializer=tf.constant_initializer(0.0) ): biases = _get_variable_wrapper( name=name, shape=shape, dtype=dtype, initializer=initializer ) return biases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bias(self):", "def get_bias(self):\n return self._bias", "def _get_bias(self) -> JTensor:\n p = self.params\n b = self.local_theta().b\n if p.forget_gate_bias != 0.0:\n b = b + self.get_adjustment()\n\n return b", "def bias_variable(shape):\n initial = tf.constant(0.1, shape=...
[ "0.7734493", "0.73660034", "0.72554916", "0.7221981", "0.72132975", "0.71687686", "0.7135788", "0.7027468", "0.69401133", "0.68844694", "0.681897", "0.67968893", "0.67830056", "0.67830056", "0.67830056", "0.67757607", "0.67674106", "0.67601573", "0.67601573", "0.67601573", "0...
0.63800156
36
Get variables in a triple pattern
def get_vars(triple): return set([v for k, v in triple.items() if v.startswith('?')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result", "def variables(self):\n return tuple(flatten([a.variables for a in self.args]))", "...
[ "0.65504885", "0.62275475", "0.60280734", "0.58704334", "0.58324313", "0.5800524", "0.5797577", "0.57811517", "0.5696571", "0.56887144", "0.5681551", "0.56710494", "0.5640094", "0.56376356", "0.56341684", "0.56109506", "0.56105363", "0.55877894", "0.5572952", "0.5562894", "0....
0.6889056
0
Find the first pattern in a set of triples pattern connected to a set of variables
def find_connected_pattern(variables, triples): pos = 0 for triple in triples: tripleVars = get_vars(triple['triple']) if len(variables & tripleVars) > 0: return triple, pos, variables | tripleVars pos += 1 return None, None, variables
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first_match(s,patterns):\n\n for p in patterns:\n m=p.match(s)\n if m:\n return p,m\n return None,None", "def __extract_pattern_nodes(graph):\n tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern)\n for tpn in tp_nodes:\n subject = list(graph.objects(tpn, AGORA.subject)).pop()\n...
[ "0.56770355", "0.54898673", "0.5466495", "0.54459643", "0.5443161", "0.5358336", "0.527935", "0.5254748", "0.525361", "0.5221164", "0.52143013", "0.51818883", "0.51779795", "0.5138044", "0.5081138", "0.5056731", "0.49985862", "0.49973455", "0.4994104", "0.49817485", "0.498029...
0.7778533
0
Find all variables from triple pattern with the same name, and then returns the equality expression + the triple pattern used to evaluate correctly the pattern.
def equality_variables(subject, predicate, obj): if subject == predicate: return "{} = {}".format(subject, predicate + '__2'), (subject, predicate + '__2', obj), "" elif subject == obj: return "{} = {}".format(subject, obj + '__2'), (subject, predicate, obj + '__2') elif predicate == obj: return "{} = {}".format(predicate, obj + '__2'), (subject, predicate, obj + '__2') return None, (subject, predicate, obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_connected_pattern(variables, triples):\n pos = 0\n for triple in triples:\n tripleVars = get_vars(triple['triple'])\n if len(variables & tripleVars) > 0:\n return triple, pos, variables | tripleVars\n pos += 1\n return None, None, variables", "def get_matching(va...
[ "0.5852751", "0.57515645", "0.5609051", "0.5592503", "0.53491706", "0.52757096", "0.5234036", "0.50917244", "0.50880814", "0.5063958", "0.5033563", "0.4941532", "0.4936441", "0.49216276", "0.49144816", "0.49125922", "0.48900846", "0.4889691", "0.48858666", "0.48518804", "0.48...
0.5591187
4
returns the frequency of a tone. formulas from
def tone_to_freq(tone): return math.pow(2, (tone - 69.0) / 12.0) * 440.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tone_frequency(self):\n return self.tone_frequency", "def tone(n, base_freq=440.0):\n # -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12\n # G G# A A# B C C# D D# E F F# G G# A\n # G Ab A Bb B C Db D Eb E F Gb G Ab A\n return base_freq * 2 ** (n/12)", "def freq():", "de...
[ "0.7913658", "0.77914226", "0.7399102", "0.7215293", "0.7153386", "0.7143531", "0.7114183", "0.70356035", "0.69720876", "0.695965", "0.69546825", "0.6954073", "0.69424343", "0.69303775", "0.6903233", "0.6880722", "0.68684185", "0.6841452", "0.6833891", "0.6807867", "0.6769327...
0.87280464
0
this function adds 5 cards from the deck to the hand
def deal_poker_hand(self, deck): for i in range(5): self.hand.append(deck.drawCard())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_card(self, added_cards):\n\n self.hand[:0] = added_cards", "def add_a_card_to_hand(self, hand, deck):\n hand.append(deck.pop())", "def deal(self, num_cards=7):\n self.deck.shuffle()\n for player in self.players:\n for i in range(num_cards):\n self.h...
[ "0.72352415", "0.7053274", "0.6955573", "0.6879287", "0.6851322", "0.68399423", "0.68385714", "0.6816445", "0.68131894", "0.680666", "0.6777246", "0.67551434", "0.6744151", "0.672975", "0.6721525", "0.67009944", "0.6674014", "0.6656667", "0.6654007", "0.66452503", "0.6627303"...
0.7919288
0
prints all cards in hand
def print_hand(self): for card in self.hand: card.printCard()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_hand(self):\n for card in self.hand:\n print(card)", "def player_show_hand(self):\n for card in self.get_hand():\n print(card.get_card())", "def show_hand(self):\n\n print(f\"{self.name.title()}'s cards are:\")\n for card in self.hand:\n pri...
[ "0.86974907", "0.83651304", "0.8254563", "0.79101205", "0.790326", "0.78862673", "0.7541825", "0.74812996", "0.7473956", "0.7269598", "0.7237259", "0.7206232", "0.72048044", "0.71964407", "0.7182787", "0.7144437", "0.71311176", "0.70919424", "0.6971113", "0.6928701", "0.68750...
0.8903049
0
There are values in the xls that have descriptions in one cell and the value to the left, this function is a helper in those cases
def get_horizontal_field_value(xls, row_index, description_index, fields_count=1, description=None, partial_match=False): if description: actual_description = get_cell_value(xls, row_index, description_index) if not actual_description: raise ValueError("empty cell at coordinate: {}:{}".format(row_index, description_index)) mismatch = False if partial_match: if description not in actual_description: mismatch = True else: if description != actual_description: mismatch = True if mismatch: raise ValueError("Mismatch between expected description and actual description: \"{}\" != \"{}\"" .format(description, actual_description)) output = [] for i in range(1, fields_count + 1, 1): cell_value = get_cell_value(xls, row_index, description_index + i) if cell_value is not None: output.append(cell_value) if not output: return None return ' '.join(v for v in output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['...
[ "0.57297254", "0.54683656", "0.544114", "0.5382319", "0.53744954", "0.5310725", "0.5275922", "0.52674615", "0.5236666", "0.5231945", "0.5164695", "0.51571536", "0.51552546", "0.5144152", "0.5134118", "0.5122181", "0.51046264", "0.51006085", "0.5028648", "0.50078714", "0.50003...
0.62578756
0
r"""Audio Microfrontend Op. This Op converts a sequence of audio data into one or more feature vectors containing filterbanks of the input. The
def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None): _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "AudioMicrofrontend", name, tld.op_callbacks, audio, "sample_rate", sample_rate, "window_size", window_size, "window_step", window_step, "num_channels", num_channels, "upper_band_limit", upper_band_limit, "lower_band_limit", lower_band_limit, "smoothing_bits", smoothing_bits, "even_smoothing", even_smoothing, "odd_smoothing", odd_smoothing, "min_signal_remaining", min_signal_remaining, "enable_pcan", enable_pcan, "pcan_strength", pcan_strength, "pcan_offset", pcan_offset, "gain_bits", gain_bits, "enable_log", enable_log, "scale_shift", scale_shift, "left_context", left_context, "right_context", right_context, "frame_stride", frame_stride, "zero_padding", zero_padding, "out_scale", out_scale, "out_type", out_type) return _result except _core._FallbackException: try: return audio_microfrontend_eager_fallback( audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if sample_rate is None: sample_rate = 16000 sample_rate = _execute.make_int(sample_rate, "sample_rate") if window_size is None: window_size = 25 window_size = _execute.make_int(window_size, "window_size") if window_step is None: window_step = 10 window_step = _execute.make_int(window_step, "window_step") if num_channels is None: num_channels = 32 num_channels = _execute.make_int(num_channels, "num_channels") if upper_band_limit is None: upper_band_limit = 7500 upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit") if lower_band_limit is None: lower_band_limit = 125 lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit") if smoothing_bits is None: smoothing_bits = 10 smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits") if even_smoothing is None: even_smoothing = 0.025 even_smoothing = _execute.make_float(even_smoothing, "even_smoothing") if odd_smoothing is None: odd_smoothing = 0.06 odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing") if min_signal_remaining is None: min_signal_remaining = 0.05 min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining") if enable_pcan is None: enable_pcan = False enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan") if pcan_strength is None: pcan_strength = 0.95 pcan_strength = _execute.make_float(pcan_strength, "pcan_strength") if pcan_offset is None: pcan_offset = 80 pcan_offset = _execute.make_float(pcan_offset, "pcan_offset") if gain_bits is None: gain_bits = 21 gain_bits = _execute.make_int(gain_bits, "gain_bits") if enable_log is None: enable_log = True enable_log = _execute.make_bool(enable_log, "enable_log") if scale_shift is None: scale_shift = 6 scale_shift = _execute.make_int(scale_shift, "scale_shift") if left_context is None: left_context = 0 left_context = _execute.make_int(left_context, "left_context") if right_context is None: right_context = 0 right_context = _execute.make_int(right_context, "right_context") if frame_stride is None: frame_stride = 1 frame_stride = _execute.make_int(frame_stride, "frame_stride") if zero_padding is None: zero_padding = False zero_padding = _execute.make_bool(zero_padding, "zero_padding") if out_scale is None: out_scale = 1 out_scale = _execute.make_int(out_scale, "out_scale") if out_type is None: out_type = _dtypes.uint16 out_type = _execute.make_type(out_type, "out_type") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "AudioMicrofrontend", audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("sample_rate", _op._get_attr_int("sample_rate"), "window_size", _op._get_attr_int("window_size"), "window_step", _op._get_attr_int("window_step"), "num_channels", _op._get_attr_int("num_channels"), "upper_band_limit", _op.get_attr("upper_band_limit"), "lower_band_limit", _op.get_attr("lower_band_limit"), "smoothing_bits", _op._get_attr_int("smoothing_bits"), "even_smoothing", _op.get_attr("even_smoothing"), "odd_smoothing", _op.get_attr("odd_smoothing"), "min_signal_remaining", _op.get_attr("min_signal_remaining"), "enable_pcan", _op._get_attr_bool("enable_pcan"), "pcan_strength", _op.get_attr("pcan_strength"), "pcan_offset", _op.get_attr("pcan_offset"), "gain_bits", _op._get_attr_int("gain_bits"), "enable_log", _op._get_attr_bool("enable_log"), "scale_shift", _op._get_attr_int("scale_shift"), "left_context", _op._get_attr_int("left_context"), "right_context", _op._get_attr_int("right_context"), "frame_stride", _op._get_attr_int("frame_stride"), "zero_padding", _op._get_attr_bool("zero_padding"), "out_scale", _op._get_attr_int("out_scale"), "out_type", _op._get_attr_type("out_type")) _inputs_flat = _op.inputs _execute.record_gradient( "AudioMicrofrontend", _inputs_flat, _attrs, _result) _result, = _result return _result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, audio, feat_kinds=['sp','mcc','f0','ap','en']):\n device = audio.device\n audio = audio.detach().cpu().numpy()\n feat = dict()\n for feat_kind in feat_kinds:\n feat[feat_kind] = list()\n\n for x in audio:\n # Preprocess\n x = x *...
[ "0.6214952", "0.6180612", "0.5916665", "0.5807464", "0.574993", "0.57417107", "0.5674335", "0.5656726", "0.559542", "0.5592476", "0.5391799", "0.53891623", "0.53865993", "0.536583", "0.5343574", "0.53194135", "0.52970415", "0.5287498", "0.52824646", "0.5269765", "0.5267847", ...
0.5350822
14
Show url landing page to allow users to sign in or register
def display_loginpage(): login_form = LoginForm() register_new_user_form = RegisterForm() return render_template('loginpage.html', register_new_user_form = register_new_user_form, login_form = login_form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def home():\n\n if not current_user.is_a...
[ "0.78786206", "0.76203156", "0.7580162", "0.74616975", "0.74271345", "0.72606593", "0.72573274", "0.72572017", "0.72520787", "0.720815", "0.7148061", "0.7125707", "0.7108026", "0.70950717", "0.7052123", "0.7041136", "0.7038737", "0.7005974", "0.7005249", "0.6992043", "0.69885...
0.67882955
36
Validating entered user info with the DB
def login(): login_form = LoginForm() user = User.query.filter_by(username=login_form.username.data).first() if login_form.validate_on_submit(): if user: if user.password == login_form.password.data: login_user(user) return redirect('/searchpage') else: flash("Sorry, the information you entered is incorrect") return redirect('/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n\n # Fetch cleaned email and username data.\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n\n # Fetch possible user objects from the database\n # based on provided and email and password.\n user_email = User.objec...
[ "0.71897084", "0.69053745", "0.68623877", "0.68019813", "0.673602", "0.671058", "0.66788834", "0.6658235", "0.6556224", "0.65534216", "0.64900243", "0.6401189", "0.6394674", "0.6372971", "0.63576025", "0.63221467", "0.6296865", "0.6296594", "0.6270587", "0.62569064", "0.61645...
0.0
-1
Adding a new user to the DB
def register_new_user(): register_new_user_form = RegisterForm() user = User.query.filter_by(username=register_new_user_form.username.data).first() email = User.query.filter_by(email = register_new_user_form.email.data).first() if user or email: flash("Sorry, the username or email already exists in the database") return redirect('/') elif register_new_user_form.validate_on_submit(): new_user = User(username=register_new_user_form.username.data, email=register_new_user_form.email.data, password=register_new_user_form.password.data) db.session.add(new_user) db.session.commit() login_user(new_user) return redirect('/searchpage')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def add_new_user_to_db():\n fi...
[ "0.84153813", "0.8296184", "0.8227132", "0.8195087", "0.8158671", "0.8113021", "0.80605036", "0.80300426", "0.8006185", "0.7954521", "0.7898283", "0.78795296", "0.78635365", "0.7859975", "0.78550875", "0.78340864", "0.7817653", "0.7773862", "0.7768065", "0.77598983", "0.77367...
0.0
-1
Displays the searchpage. This is the user's homepage
def display_search_page(): favorite_players = [] favorites = Favorite.query.filter_by(id = current_user.id).all() if len(favorites) > 0: for favorite in favorites: player = get_favorites(favorite.favorited_item) player_info = player[0] favorite_players.append(player_info) else: favorite_players = [] return render_template('searchpage.html', favorite_players = favorite_players)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search():\r\n return render_template(\"/home/search.html\")", "def search_page():\n return render_template('page_query.html', search_label=g_search_type)", "def go_to_search():\n\tuser_id = session.get(\"user_id\")\n\tuser = User.query.filter_by(user_id=user_id).first()\n\n\treturn render_template(\"...
[ "0.8220182", "0.81900704", "0.7612297", "0.7570061", "0.74575865", "0.73217803", "0.72212404", "0.7209977", "0.71231586", "0.708114", "0.70068485", "0.70039576", "0.6972065", "0.6970202", "0.69344836", "0.6908577", "0.68495345", "0.68272036", "0.6818644", "0.6812315", "0.6807...
0.6964854
14
The user clicked to update their favorites. This checks whether or not to remove the athlete in the session as a favorite
def update_favorites(): check_favorite = Favorite.query.filter(Favorite.favorited_item==session["athlete_id"]).first() route = f'/athletes/{session["athlete_id"]}' if check_favorite is None: new_update = Favorite(id=current_user.id, favorited_item=session["athlete_id"]) db.session.add(new_update) else: db.session.delete(check_favorite) db.session.commit() return redirect(route)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)", "def favourite(self, favourite):\n\n self._favourite = f...
[ "0.69176424", "0.688864", "0.68283784", "0.66788083", "0.6618724", "0.65838104", "0.64054716", "0.62992626", "0.6211721", "0.6205134", "0.61849916", "0.61616564", "0.6160226", "0.60770786", "0.60408217", "0.60285735", "0.6025413", "0.60150605", "0.60038364", "0.59932923", "0....
0.7853664
0
Logs out the user
def logout(): logout_user() return redirect('/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_out_user(self):\n flask_login.logout_user()", "def logout_user():\n pass", "def logout():\n return logout_user()", "def logOut(self):\n self.client.logout()", "def logout():", "def logout():\n login()", "def logout(self):\n self.change_user(self.username, None, Non...
[ "0.8877641", "0.8626286", "0.839422", "0.8392201", "0.8352932", "0.8337815", "0.82495695", "0.8176587", "0.8166211", "0.8124933", "0.81026566", "0.80388916", "0.80171776", "0.800409", "0.79627264", "0.7959641", "0.79488003", "0.79390985", "0.7923234", "0.7908623", "0.7902515"...
0.7743278
43
Run all the parts of this task.
def main(): # delete the database file if it already exists db_path = Path('../../data/db.sqlite') db_path.unlink(missing_ok=True) # create the database with sqlite3.connect(db_path) as connection: create_database(connection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n try:\n self._run()\n except Exception as err:\n # TODO: Do Task Failure to run exception handling\n pass", "def run(self):\n self.run()", "def run(self):\n\n self._get_routes()\n self._calculate_emissions()", "def run(self):...
[ "0.7519389", "0.7507399", "0.7174385", "0.71610844", "0.70846206", "0.706648", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", "0.7065904", ...
0.0
-1
Create tables for orders, delivery, and clicks.
def create_database(connection): cursor = connection.cursor() # create an orders table, dropping some duplicate rows to satisfy the primary key constraint print("Creating orders table ...") cursor.execute(''' CREATE TABLE JD_order_data ( order_ID TEXT NOT NULL CHECK(LENGTH(order_ID) = 10), sku_ID TEXT NOT NULL CHECK(LENGTH(sku_ID) = 10), user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10), order_time DATETIME NOT NULL, quantity INT NOT NULL, final_unit_price REAL NOT NULL, PRIMARY KEY (order_ID, sku_ID) ) ''') orders = pd.read_csv('../../data/JD_order_data.csv', low_memory=False) orders = orders[['order_ID', 'sku_ID', 'user_ID', 'order_time', 'quantity', 'final_unit_price']] orders = orders.groupby(['order_ID', 'sku_ID'], as_index=False).first() orders.to_sql('JD_order_data', connection, index=False, if_exists='append') cursor.execute('CREATE INDEX orders_user_index ON JD_order_data (user_ID)') # create a delivery table print("Creating delivery table ...") cursor.execute(''' CREATE TABLE JD_delivery_data ( order_ID TEXT NOT NULL CHECK(LENGTH(order_ID) = 10), package_ID TEXT NOT NULL CHECK(LENGTH(package_ID) = 10), ship_out_time DATETIME NOT NULL, PRIMARY KEY (order_ID, package_ID), FOREIGN KEY (order_ID) REFERENCES JD_order_data (order_ID) ) ''') delivery = pd.read_csv('../../data/JD_delivery_data.csv', parse_dates=['ship_out_time']) delivery = delivery[['order_ID', 'package_ID', 'ship_out_time']] delivery.to_sql('JD_delivery_data', connection, index=False, if_exists='append') # create a clicks table print("Creating clicks table ...") cursor.execute(''' CREATE TABLE JD_click_data ( user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10), sku_ID TEXT NOT NULL CHECK(LENGTH(sku_ID) = 10), request_time DATETIME NOT NULL, FOREIGN KEY (user_ID) REFERENCES JD_order_data (user_ID), FOREIGN KEY (sku_ID) REFERENCES JD_order_data (sku_ID) ) ''') clicks = pd.read_csv('../../data/JD_click_data.csv', parse_dates=['request_time']) clicks = clicks[clicks['user_ID'] != '-'] clicks = clicks[['user_ID', 'sku_ID', 'request_time']] clicks.to_sql('JD_click_data', connection, index=False, if_exists='append') cursor.execute('CREATE INDEX clicks_user_index ON JD_click_data (user_ID)') cursor.execute('CREATE INDEX clicks_sku_index ON JD_click_data (sku_ID)') # Create a user table print("Creating users table ...") cursor.execute(''' CREATE TABLE JD_user_data ( user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10), plus INT NOT NULL CHECK (plus IN (0, 1)), PRIMARY KEY (user_ID) ) ''') users = pd.read_csv('../../data/JD_user_data.csv', low_memory=False) users = users[['user_ID', 'plus']] users = users.groupby(['user_ID'], as_index=False).first() users.to_sql('JD_user_data', connection, index=False, if_exists='append') cursor.execute('CREATE INDEX users_user_index ON JD_user_data (user_ID)')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def create_tables(self):\n ...
[ "0.752659", "0.73547345", "0.69034", "0.6883073", "0.6853905", "0.68315214", "0.67800254", "0.6657214", "0.6652979", "0.65973955", "0.65843534", "0.6528014", "0.65207535", "0.6505119", "0.6502627", "0.64933836", "0.6487306", "0.6475528", "0.6457907", "0.64303106", "0.64230615...
0.0
-1
Add single hopping to hopping matrix.
def set_element(self, rel_unit_cell, element, hop): self.dict[element[0]][rel_unit_cell + (element[1],)] = hop
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addNeighbor(self, neighbor):", "def add_neighbor(self, neighbor):\r\n self.neighbors.append(neighbor)", "def add_neighbor(self, cell):\n self.__neighbors.append(cell)", "def __heappush(heap, nodes, node):\n pos = len(heap)\n heap.append(node)\n nodes[node[1]] = pos\n ...
[ "0.6541676", "0.5901997", "0.5716449", "0.5625333", "0.554359", "0.5538659", "0.5529868", "0.5503152", "0.54820204", "0.5470297", "0.5470127", "0.5427927", "0.54236084", "0.54061526", "0.5405852", "0.5400103", "0.53873384", "0.5377393", "0.5375697", "0.536957", "0.5349497", ...
0.5005826
43
Adds hopping conjugates to self.dict.
def add_conjugates(self): # declare new dict self.new_dict = copy.deepcopy(self.dict) # iterate over items for i in range(len(self.dict)): for rel_tag, hopping in self.dict[i].items(): x, y, z, j = rel_tag reverse_tag = (-x, -y, -z, i) reverse_hopping = np.conjugate(np.transpose(hopping)) if reverse_tag not in self.new_dict[j]: self.new_dict[j][reverse_tag] = reverse_hopping # done self.dict = self.new_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matt...
[ "0.5047579", "0.5037985", "0.502265", "0.5007653", "0.49975562", "0.49723896", "0.49697816", "0.48349583", "0.48268276", "0.48259962", "0.48032284", "0.47840226", "0.4778364", "0.47758386", "0.47497863", "0.4746505", "0.47428873", "0.4741161", "0.47241557", "0.47111377", "0.4...
0.65367895
0
Add a gaussian noise to to the result of Dense layer. The added gaussian noise is not related to the origin input.
def __init__(self, hidden_units, act_fn=activation_fn, output_shape=1, out_activation=None, out_layer=True): super().__init__() for u in hidden_units: self.add(GaussianNoise(0.4)) # Or use kwargs self.add(Dense(u, act_fn)) if out_layer: self.add(GaussianNoise(0.4)) self.add(Dense(output_shape, out_activation))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_gaussian_noise(x, std):\n noise = x.new_zeros(x.size()).normal_(std=std)\n return x + noise", "def add_gaussian_noise(X, mu=0, sigma=0.1):\n noise = np.random.normal(0.0, sigma, size=X.size)\n return X + noise.reshape(X.shape)", "def add_gaussian_noise(self, samples):\n\n if 'sigma' ...
[ "0.7656182", "0.7478594", "0.71226805", "0.7062763", "0.7007418", "0.69041336", "0.66500264", "0.66093963", "0.65894604", "0.65828973", "0.65525967", "0.6540314", "0.65288055", "0.65264595", "0.64818", "0.64335525", "0.6431225", "0.6427232", "0.64240474", "0.6390431", "0.6390...
0.0
-1
Shift input ids one token to the right, and wrap the last non pad token (usually ).
def shift_tokens_right(self, input_ids, pad_token_id): prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_tokens_right(input_ids, pad_token_id):\r\n prev_output_tokens = input_ids.clone()\r\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\r\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\r\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\r\n re...
[ "0.8174382", "0.7355779", "0.730172", "0.62766033", "0.5987434", "0.59027076", "0.5735923", "0.5567176", "0.5503809", "0.54715633", "0.54715633", "0.54715633", "0.5460714", "0.54170334", "0.5338661", "0.53169227", "0.52875674", "0.52819985", "0.5207631", "0.51908386", "0.5179...
0.81405616
1
Visualizes in a pyplot window an image and a label pair from provided paths. For reading files, Pillow is used so all paths and formats must be Pillowcompatible. The task definition is used to define colors for label ids (see panoptic_parts/utils/defs/template_v1.0.yaml).
def visualize_from_paths(image_path, label_path, task_def_path): # sid2color is a mapping from all possible sids to colors with open(task_def_path) as fp: task_def = yaml.load(fp, Loader=yaml.Loader) sid2color = task_def['sid2color'] # add colors for all sids that may exist in labels, but don't have a color from task_def sid2color.update({sid: sid2color[-1] # we use the void color here for sid in range(task_def['max_sid']) if not (sid in task_def['valid_sids'] or sid in sid2color)}) # reduce resolution for faster execution image = Image.open(image_path) label = Image.open(label_path) uids = np.array(label, dtype=np.int32) # optionally transform parts ids # here we trasform the pids from the original dataset to another set of pids according # to sid2pids_groups, where parts for some scene-level semantic classes are grouped # TODO(panos): consider moving this functionality to colorize_label if 'sid2pids_groups' in task_def.keys(): uids = _transform_uids(uids, task_def['max_sid'], task_def['sid2pids_groups']) # create the colorized label images uids_sem_inst_parts_colored, uids_sem_colored, uids_sem_inst_colored = \ experimental_colorize_label(uids, sid2color=sid2color, return_sem=True, return_sem_inst=True, emphasize_instance_boundaries=True) # plot # initialize figure for plotting _, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2) # for ax in axes: # ax.set_axis_off() ax1.imshow(image) ax1.set_title('image') ax2.imshow(uids_sem_colored) ax2.set_title('labels colored on semantic level') ax3.imshow(uids_sem_inst_colored) ax3.set_title('labels colored on semantic and instance levels') ax4.imshow(uids_sem_inst_parts_colored) ax4.set_title('labels colored on semantic, instance, and parts levels') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_oneshot_task(pairs):\n fig,(ax1,ax2) = plt.subplots(2)\n ax1.matshow(pairs[0][0].reshape(300,300),cmap='gray')\n img = concat_images(pairs[1])\n ax1.get_yaxis().set_visible(False)\n ax1.get_xaxis().set_visible(False)\n ax2.matshow(img,cmap='gray')\n plt.xticks([])\n plt.yticks([])\...
[ "0.62449753", "0.5956047", "0.5905638", "0.5834986", "0.5807051", "0.57767344", "0.5769278", "0.5720147", "0.5714393", "0.5708018", "0.55924964", "0.55918765", "0.55361027", "0.55186796", "0.5489448", "0.5472695", "0.54674464", "0.54264355", "0.5421594", "0.54154986", "0.5411...
0.80462617
0
Computes top principal component.
def pc1(d): import numpy as np from sklearn.decomposition import TruncatedSVD # Normalize data t1 = d.T t1 = t1 - t1.mean(axis=0) t1 = t1 / (np.sqrt((t1**2).mean(axis=0)) + 1E-200) t0 = TruncatedSVD(n_components=1) t1 = t0.fit_transform(t1).T.astype(d.dtype, copy=False).ravel() assert t1.shape == (d.shape[1], ) return t1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first_principal_component(X):\n guess = [1 for _ in X[0]]\n unscaled_maximizer = maximize_batch(\n partial(directional_variance, X),\n partial(directional_variance_gradient, X),\n guess)\n...
[ "0.6089231", "0.5806278", "0.5718602", "0.5631194", "0.555383", "0.5522287", "0.55130035", "0.5443612", "0.54245883", "0.53624135", "0.53293145", "0.53293145", "0.53293145", "0.5287979", "0.5231421", "0.5225504", "0.5204582", "0.51921564", "0.5179744", "0.5143214", "0.511822"...
0.0
-1
Converts a pandas series of string to numeric ratios.
def toratio(s): from operator import truediv return s.apply(lambda x: truediv(*[int(y) for y in x.split('/')]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_float(series):\n series = series.str[:-4].str.replace('.', '').str.replace(',', '.').astype(float)\n return series", "def convert(srat):\n try:\n return float(srat)\n except ValueError:\n num, denom = srat.split('/')\n return float(num) / float(denom)", "def conv...
[ "0.6325216", "0.6113206", "0.58220035", "0.5687025", "0.567426", "0.56450087", "0.5632027", "0.5590445", "0.5578296", "0.5572146", "0.5564546", "0.5494868", "0.54813343", "0.5451129", "0.54271585", "0.5381694", "0.5367929", "0.5367361", "0.5344174", "0.5337416", "0.5271317", ...
0.48489207
58
Finds GO enrichment with goatools (0.7.11 tested).
def goe( genelist, go_file, goa_file, bg=None, nmin=5, conversion=None, evidence_set={ 'EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'HTP', 'HDA', 'HMP', 'HGI', 'IBA', 'IBD', 'IKR', 'IRD', 'ISS', 'ISO', 'ISA', 'ISM'}): from tempfile import NamedTemporaryFile from os import linesep from goatools.go_enrichment import GOEnrichmentStudy from goatools.obo_parser import GODag from goatools.associations import read_gaf from collections import defaultdict import itertools from biothings_client import get_client import pandas as pd import logging assert type(genelist) is list and len(genelist) > 0 if nmin < 1: nmin = 1 bg0 = bg # Convert gene names if conversion is not None: assert len(conversion) == 3 name_from, name_to, species = conversion mg = get_client('gene') ans = set(genelist) if bg is not None: t1 = set(bg) assert len(ans - t1) == 0 ans |= t1 ans = list(ans) ans = mg.querymany(ans, scopes=name_from, fields=name_to, species=species) t1 = set(['query', '_score', name_to.split('.')[0]]) ans = list(filter(lambda x: len(t1 - set(x)) == 0, ans)) ans = sorted(ans, key=lambda x: x['_score']) convert = {x['query']: x for x in ans} for xi in name_to.split('.'): convert = filter(lambda x: xi in x[1], convert.items()) convert = {x[0]: x[1][xi] for x in convert} convert = { x[0]: x[1] if type(x[1]) is str else x[1][0] for x in convert.items()} genelist2 = list( set([convert[x] for x in filter(lambda x: x in convert, genelist)])) if bg is not None: bg = list(set([convert[x] for x in filter(lambda x: x in convert, bg)])) t1 = set(genelist) converti = list(filter(lambda x: x[0] in t1, convert.items())) t1 = defaultdict(list) for xi in converti: t1[xi[1]].append(xi[0]) converti = dict(t1) t1 = defaultdict(list) for xi in convert.items(): t1[xi[1]].append(xi[0]) convertia = dict(t1) else: genelist2 = genelist # Load GO DAG and association files logging.debug('Reading GO DAG file ' + go_file) godag = GODag(go_file) logging.debug('Reading GO association file ' + goa_file) goa = read_gaf(goa_file, evidence_set=evidence_set) if bg is None: bg = list(goa.keys()) # Compute enrichment goe = GOEnrichmentStudy(bg, goa, godag) ans = goe.run_study(genelist2) # Format output with NamedTemporaryFile() as f: goe.wr_tsv(f.name, ans) ans = f.read() ans = ans.decode() ans = [x.split('\t') for x in ans.split(linesep)] if len(ans[-1]) < 2: ans = ans[:-1] if len(ans) == 0 or len(ans[0]) == 0: raise ValueError('No enrichment found. Check your input ID type.') ans[0][0] = ans[0][0].strip('# ') ans = pd.DataFrame(ans[1:], columns=ans[0]) ans.drop(['NS', 'enrichment', 'study_count', 'p_sidak', 'p_holm'], axis=1, inplace=True) for xj in ['p_uncorrected', 'p_bonferroni']: ans[xj] = pd.to_numeric(ans[xj], errors='raise') ans['depth'] = pd.to_numeric(ans['depth'], errors='raise', downcast='unsigned') # Odds ratio column and sort column ans['odds_ratio'] = toratio(ans['ratio_in_study']) / toratio( ans['ratio_in_pop']) ans = ans[[ 'name', 'depth', 'p_uncorrected', 'p_bonferroni', 'odds_ratio', 'ratio_in_study', 'ratio_in_pop', 'GO', 'study_items']] ans['study_items'] = ans['study_items'].apply(lambda x: x.replace(' ', '')) # Convert back study_items if conversion is not None: ans['study_items'] = ans['study_items'].apply(lambda x: ','.join( list(itertools.chain.from_iterable([converti[y] for y in x.split(',')]))) if len(x) > 0 else x) ans.sort_values('p_uncorrected', inplace=True) # Get top enriched GO by P-value gotop = ans[ (ans['odds_ratio'] > 1) & ans['ratio_in_study'].apply(lambda x: int(x.split('/')[0]) >= nmin)] if len(gotop) == 0: raise ValueError('No GO enrichment found for given criteria.') gotop = str(gotop.iloc[0]['GO']) if bg0 is not None: # Children GOs gos = set([gotop] + list(godag.query_term(gotop).get_all_children())) # Look for genes genes = list( filter(lambda x: len(list(filter(lambda y: y in gos, goa[x]))) > 0, goa)) if conversion is not None: genes = [convertia[x] for x in filter(lambda x: x in convertia, genes)] genes = list(set(list(itertools.chain.from_iterable(genes)))) genes = set(genes) genes = list(filter(lambda x: x in genes, bg0)) else: genes = None return (ans, gotop, genes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_functional_enrichment(enrichment_file, go, remove_parents=False, only_biological_processes=False, only_slim=False, logodds_cutoff=0):\n\n #from toolbox import OboParser\n #g=OboParser.getOboGraph(\"/home/emre/arastirma/celldiff/data/GO/gene_ontology.1_2.obo\")\n g=go\n\n go_terms = None\n na...
[ "0.62130064", "0.5718101", "0.53815275", "0.53254366", "0.5073769", "0.50681895", "0.49845618", "0.49842817", "0.4910564", "0.48540446", "0.4827082", "0.47912404", "0.47549713", "0.47513545", "0.47407323", "0.47222322", "0.47175512", "0.47109953", "0.47066993", "0.47058165", ...
0.4953663
8
Finds the top variable GO enrichment of top principal genes in the binary coexpression network. Principal genes are those with most coexpressed genes. They reflect the most variable pathways in the dataset. When the variable pathways are housekeeping related, they may conceal celltypespecific coexpression patterns from being observed and understood. This function identifies the most variable pathway with gene ontology enrichment study of the top principal genes. Background genes are all genes provided.
def gotop(net, namet, go_file, goa_file, n=100, **ka): import numpy as np nt = len(namet) if net.shape != (nt, nt) or nt <= 1: raise ValueError('Wrong shape for net or namet.') if n <= 1 or n >= nt: raise ValueError( 'Number of principal genes must be from 1 to the number of all genes (exclusive).' ) # Find principal genes t1 = net.sum(axis=1) t2 = t1[t1.argsort()[::-1][n]] if t2 == 0: raise RuntimeError('Not enough principal genes that have co-expression') t2 = np.nonzero(t1 >= t2)[0] assert len(t2) >= n assert len(t2) < nt t1 = [str(x) for x in namet[t2]] t2 = [str(x) for x in namet] return tuple([t1] + list(goe(t1, go_file, goa_file, bg=t2, **ka)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetTopConfidenceNodes(g, dia, topn=20):\r\n conf_hf = {}\r\n for i in g.nodes(data=True):\r\n n, distr = i\r\n if (distr['class_distribution']['No Diagnosis'] != 0):\r\n conf_hf[n] = distr['class_distribution']['Diagnosis'] / (\r\n distr['class_distribution...
[ "0.5159819", "0.5115306", "0.504334", "0.48993444", "0.48605645", "0.48025545", "0.47738016", "0.4770416", "0.47697255", "0.4766613", "0.4763854", "0.4754322", "0.47231025", "0.47138077", "0.4684063", "0.46748742", "0.4669275", "0.46637076", "0.46562827", "0.46556216", "0.465...
0.0
-1